code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowerCamelCase : int = datasets.utils.logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = ["names", "prefix"]
lowerCamelCase : Optional[int] = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
lowerCamelCase : Tuple = ["encoding_errors", "on_bad_lines"]
lowerCamelCase : Union[str, Any] = ["date_format"]
@dataclass
class A( datasets.BuilderConfig ):
'''simple docstring'''
UpperCamelCase = ","
UpperCamelCase = None
UpperCamelCase = "infer"
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = True
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = False
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = True
UpperCamelCase = None
UpperCamelCase = "."
UpperCamelCase = None
UpperCamelCase = '"'
UpperCamelCase = 0
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = 0
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = None
UpperCamelCase = 1_0000
UpperCamelCase = None
UpperCamelCase = "strict"
UpperCamelCase = "error"
UpperCamelCase = None
def a__ ( self : int ) -> Any:
"""simple docstring"""
if self.delimiter is not None:
lowerCamelCase_ = self.delimiter
if self.column_names is not None:
lowerCamelCase_ = self.column_names
@property
def a__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCamelCase_ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A( datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCamelCase = CsvConfig
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def a__ ( self : List[str] , A_ : Dict ) -> Dict:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
lowerCamelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCamelCase_ , (str, list, tuple) ):
lowerCamelCase_ = data_files
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCamelCase_ = [files]
lowerCamelCase_ = [dl_manager.iter_files(lowerCamelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
lowerCamelCase_ = []
for split_name, files in data_files.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCamelCase_ = [files]
lowerCamelCase_ = [dl_manager.iter_files(lowerCamelCase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCamelCase_ , gen_kwargs={'files': files} ) )
return splits
def a__ ( self : List[str] , A_ : Dict ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
lowerCamelCase_ = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCamelCase_ ) for feature in self.config.features.values() ):
# cheaper cast
lowerCamelCase_ = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCamelCase_ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
lowerCamelCase_ = table_cast(lowerCamelCase_ , lowerCamelCase_ )
return pa_table
def a__ ( self : List[Any] , A_ : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
lowerCamelCase_ = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCamelCase_ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCamelCase_ ) ):
lowerCamelCase_ = pd.read_csv(lowerCamelCase_ , iterator=lowerCamelCase_ , dtype=lowerCamelCase_ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCamelCase_ ):
lowerCamelCase_ = pa.Table.from_pandas(lowerCamelCase_ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCamelCase_ )
except ValueError as e:
logger.error(f"""Failed to read file \'{file}\' with error {type(lowerCamelCase_ )}: {e}""" )
raise
| 70
|
from __future__ import annotations
class lowerCamelCase_ :
def __init__( self , lowerCamelCase_ , lowerCamelCase_ ) -> int:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = text, pattern
_UpperCamelCase , _UpperCamelCase = len(lowerCamelCase_ ), len(lowerCamelCase_ )
def lowercase ( self , lowerCamelCase_ ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowercase ( self , lowerCamelCase_ ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowercase ( self ) -> list[int]:
"""simple docstring"""
_UpperCamelCase = []
for i in range(self.textLen - self.patLen + 1 ):
_UpperCamelCase = self.mismatch_in_text(lowerCamelCase_ )
if mismatch_index == -1:
positions.append(lowerCamelCase_ )
else:
_UpperCamelCase = self.match_in_pattern(self.text[mismatch_index] )
_UpperCamelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__lowerCAmelCase = """ABAABA"""
__lowerCAmelCase = """AB"""
__lowerCAmelCase = BoyerMooreSearch(text, pattern)
__lowerCAmelCase = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 147
| 0
|
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
UpperCamelCase : List[Any] = getLogger(__name__)
UpperCamelCase : Tuple = """cuda""" if torch.cuda.is_available() else """cpu"""
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : str , snake_case : str , snake_case : int = 8 , snake_case : str = DEFAULT_DEVICE , snake_case : Optional[Any]=False , snake_case : List[Any]="summarization" , snake_case : Optional[int]=None , **snake_case : List[Any] , ) -> Dict:
"""simple docstring"""
a : str = Path(snake_case__ ).open('w' , encoding='utf-8' )
a : Optional[int] = str(snake_case__ )
a : str = AutoModelForSeqaSeqLM.from_pretrained(snake_case__ ).to(snake_case__ )
if fpaa:
a : str = model.half()
a : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case__ )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
a : int = time.time()
# update config with task specific params
use_task_specific_params(snake_case__ , snake_case__ )
if prefix is None:
a : Dict = prefix or getattr(model.config , 'prefix' , '' ) or """"""
for examples_chunk in tqdm(list(chunks(snake_case__ , snake_case__ ) ) ):
a : Optional[int] = [prefix + text for text in examples_chunk]
a : Optional[int] = tokenizer(snake_case__ , return_tensors='pt' , truncation=snake_case__ , padding='longest' ).to(snake_case__ )
a : Optional[int] = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **snake_case__ , )
a : Union[str, Any] = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
a : Any = int(time.time() - start_time ) # seconds
a : Union[str, Any] = len(snake_case__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
"""simple docstring"""
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def SCREAMING_SNAKE_CASE__ ( snake_case : str=True ) -> Dict:
"""simple docstring"""
a : int = argparse.ArgumentParser()
parser.add_argument('model_name' , type=snake_case__ , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=snake_case__ , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=snake_case__ , help='where to save summaries' )
parser.add_argument('--reference_path' , type=snake_case__ , required=snake_case__ , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=snake_case__ , required=snake_case__ , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=snake_case__ , required=snake_case__ , default=snake_case__ , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=snake_case__ , required=snake_case__ , default=snake_case__ , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=snake_case__ , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=snake_case__ , default=8 , required=snake_case__ , help='batch size' )
parser.add_argument(
'--n_obs' , type=snake_case__ , default=-1 , required=snake_case__ , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=snake_case__ , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
a : List[Any] = parser.parse_known_args()
a : List[str] = parse_numeric_n_bool_cl_kwargs(snake_case__ )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
a : Optional[int] = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
a : str = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=snake_case__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
a : Any = generate_summaries_or_translations(
snake_case__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **snake_case__ , )
if args.reference_path is None:
return {}
# Compute scores
a : Union[str, Any] = calculate_bleu if """translation""" in args.task else calculate_rouge
a : str = [x.rstrip() for x in open(args.save_path ).readlines()]
a : Dict = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(snake_case__ )]
a : dict = score_fn(snake_case__ , snake_case__ )
scores.update(snake_case__ )
if args.dump_args:
scores.update(snake_case__ )
if args.info:
a : Union[str, Any] = args.info
if verbose:
print(snake_case__ )
if args.score_path is not None:
json.dump(snake_case__ , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 706
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : str = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCamelCase : Optional[int] = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
}
}
UpperCamelCase : List[str] = {
"""camembert-base""": 512,
}
UpperCamelCase : List[Any] = """▁"""
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = VOCAB_FILES_NAMES
A : Any = PRETRAINED_VOCAB_FILES_MAP
A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Any = ["input_ids", "attention_mask"]
def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]="<s>" , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Optional[Any]="<s>" , UpperCAmelCase_ : int="<unk>" , UpperCAmelCase_ : int="<pad>" , UpperCAmelCase_ : Tuple="<mask>" , UpperCAmelCase_ : int=["<s>NOTUSED", "</s>NOTUSED"] , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : Dict = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
a : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(UpperCAmelCase_))
a : Tuple = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
a : str = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
a : Optional[Any] = len(self.fairseq_tokens_to_ids)
a : List[str] = len(self.sp_model) + len(self.fairseq_tokens_to_ids)
a : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a : Optional[int] = [self.cls_token_id]
a : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_)) + [1]
return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
a : List[Any] = [self.sep_token_id]
a : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return len(self.fairseq_tokens_to_ids) + len(self.sp_model)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : int = {self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : str):
"""simple docstring"""
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(UpperCAmelCase_) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : List[str] = []
a : List[str] = ''
a : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase_) + token
a : Tuple = True
a : Optional[Any] = []
else:
current_sub_tokens.append(UpperCAmelCase_)
a : int = False
out_string += self.sp_model.decode(UpperCAmelCase_)
return out_string.strip()
def __getstate__( self : Union[str, Any]):
"""simple docstring"""
a : str = self.__dict__.copy()
a : List[Any] = None
return state
def __setstate__( self : List[Any] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
a : Tuple = {}
a : str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
a : Optional[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase_ , 'wb') as fi:
a : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_)
return (out_vocab_file,)
| 610
| 0
|
from typing import Any
def __UpperCamelCase ( lowercase__ : list , lowercase__ : list , lowercase__ : dict , lowercase__ : dict , lowercase__ : dict , ) -> list:
'''simple docstring'''
_validation(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
# Creates data structures and fill initial step
lowerCAmelCase_ : dict = {}
lowerCAmelCase_ : dict = {}
for state in states_space:
lowerCAmelCase_ : Union[str, Any] = observations_space[0]
lowerCAmelCase_ : Union[str, Any] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCAmelCase_ : Optional[Any] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowercase__ ) ):
lowerCAmelCase_ : str = observations_space[o]
lowerCAmelCase_ : List[Any] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCAmelCase_ : Any = """"""
lowerCAmelCase_ : Dict = -1
for k_state in states_space:
lowerCAmelCase_ : List[Any] = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCAmelCase_ : Tuple = probability
lowerCAmelCase_ : Optional[int] = k_state
# Update probabilities and pointers dicts
lowerCAmelCase_ : Optional[Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCAmelCase_ : str = arg_max
# The final observation
lowerCAmelCase_ : Optional[int] = observations_space[len(lowercase__ ) - 1]
# argmax for given final observation
lowerCAmelCase_ : Union[str, Any] = """"""
lowerCAmelCase_ : Optional[int] = -1
for k_state in states_space:
lowerCAmelCase_ : Dict = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCAmelCase_ : Optional[Any] = probability
lowerCAmelCase_ : Dict = k_state
lowerCAmelCase_ : int = arg_max
# Process pointers backwards
lowerCAmelCase_ : Dict = last_state
lowerCAmelCase_ : Optional[int] = []
for o in range(len(lowercase__ ) - 1 , -1 , -1 ):
result.append(lowercase__ )
lowerCAmelCase_ : Dict = pointers[previous, observations_space[o]]
result.reverse()
return result
def __UpperCamelCase ( lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
_validate_lists(lowercase__ , lowercase__ )
_validate_dicts(
lowercase__ , lowercase__ , lowercase__ )
def __UpperCamelCase ( lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def __UpperCamelCase ( lowercase__ : Any , lowercase__ : Any ) -> None:
'''simple docstring'''
_validate_list(lowercase__ , """observations_space""" )
_validate_list(lowercase__ , """states_space""" )
def __UpperCamelCase ( lowercase__ : Any , lowercase__ : str ) -> None:
'''simple docstring'''
if not isinstance(_object , lowercase__ ):
lowerCAmelCase_ : int = f'{var_name} must be a list'
raise ValueError(lowercase__ )
else:
for x in _object:
if not isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase_ : Optional[int] = f'{var_name} must be a list of strings'
raise ValueError(lowercase__ )
def __UpperCamelCase ( lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
_validate_dict(lowercase__ , """initial_probabilities""" , lowercase__ )
_validate_nested_dict(lowercase__ , """transition_probabilities""" )
_validate_nested_dict(lowercase__ , """emission_probabilities""" )
def __UpperCamelCase ( lowercase__ : Any , lowercase__ : str ) -> None:
'''simple docstring'''
_validate_dict(_object , lowercase__ , lowercase__ )
for x in _object.values():
_validate_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __UpperCamelCase ( lowercase__ : Any , lowercase__ : str , lowercase__ : type , lowercase__ : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , lowercase__ ):
lowerCAmelCase_ : List[str] = f'{var_name} must be a dict'
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object ):
lowerCAmelCase_ : Optional[Any] = f'{var_name} all keys must be strings'
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object.values() ):
lowerCAmelCase_ : Any = """nested dictionary """ if nested else """"""
lowerCAmelCase_ : Union[str, Any] = f'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(lowercase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 600
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( lowercase__ : str ) -> None:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ : str = analyze_text(lowercase__ )
lowerCAmelCase_ : Optional[int] = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
lowerCAmelCase_ : Any = sum(single_char_strings.values() )
# one length string
lowerCAmelCase_ : Optional[Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCAmelCase_ : Union[str, Any] = single_char_strings[ch]
lowerCAmelCase_ : Tuple = my_str / all_sum
my_fir_sum += prob * math.loga(lowercase__ ) # entropy formula.
# print entropy
print(f'{round(-1 * my_fir_sum ):.1f}' )
# two len string
lowerCAmelCase_ : Any = sum(two_char_strings.values() )
lowerCAmelCase_ : List[str] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCAmelCase_ : List[str] = cha + cha
if sequence in two_char_strings:
lowerCAmelCase_ : Any = two_char_strings[sequence]
lowerCAmelCase_ : Optional[Any] = int(lowercase__ ) / all_sum
my_sec_sum += prob * math.loga(lowercase__ )
# print second entropy
print(f'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def __UpperCamelCase ( lowercase__ : str ) -> tuple[dict, dict]:
'''simple docstring'''
lowerCAmelCase_ : Any = Counter() # type: ignore
lowerCAmelCase_ : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(lowercase__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 600
| 1
|
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE__ : Optional[int] = parse(importlib.metadata.version("torch"))
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]:
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
a : Dict = STR_OPERATION_TO_FUNC[operation]
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
a : List[str] = parse(importlib.metadata.version(UpperCAmelCase__ ) )
return operation(UpperCAmelCase__ , parse(UpperCAmelCase__ ) )
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
return compare_versions(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
| 509
|
"""simple docstring"""
from math import factorial, pi
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ = 30 ) -> float:
if not isinstance(UpperCAmelCase__ , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
a : Tuple = float(UpperCAmelCase__ )
a : Any = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(UpperCAmelCase__ ) )
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ = 30 ) -> float:
if not isinstance(UpperCAmelCase__ , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
a : Dict = float(UpperCAmelCase__ )
a : List[str] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(UpperCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 509
| 1
|
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_lowercase = {'UserAgent': UserAgent().random}
def __UpperCamelCase ( a : Any ) ->dict:
snake_case = script.contents[0]
snake_case = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _lowercase :
def __init__( self , A__ ) -> str:
snake_case = F"""https://www.instagram.com/{username}/"""
snake_case = self.get_json()
def UpperCamelCase ( self ) -> dict:
snake_case = requests.get(self.url , headers=A__ ).text
snake_case = BeautifulSoup(A__ , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ) -> str:
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ) -> str:
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def UpperCamelCase ( self ) -> str:
return self.user_data["username"]
@property
def UpperCamelCase ( self ) -> str:
return self.user_data["full_name"]
@property
def UpperCamelCase ( self ) -> str:
return self.user_data["biography"]
@property
def UpperCamelCase ( self ) -> str:
return self.user_data["business_email"]
@property
def UpperCamelCase ( self ) -> str:
return self.user_data["external_url"]
@property
def UpperCamelCase ( self ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def UpperCamelCase ( self ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def UpperCamelCase ( self ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def UpperCamelCase ( self ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def UpperCamelCase ( self ) -> bool:
return self.user_data["is_verified"]
@property
def UpperCamelCase ( self ) -> bool:
return self.user_data["is_private"]
def __UpperCamelCase ( a : str = "github" ) ->None:
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
snake_case = InstagramUser(a )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , a )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 12_0000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = InstagramUser('github')
print(instagram_user)
print(f'{instagram_user.number_of_posts = }')
print(f'{instagram_user.number_of_followers = }')
print(f'{instagram_user.number_of_followings = }')
print(f'{instagram_user.email = }')
print(f'{instagram_user.website = }')
print(f'{instagram_user.profile_picture_url = }')
print(f'{instagram_user.is_verified = }')
print(f'{instagram_user.is_private = }')
| 342
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_lowercase = logging.get_logger(__name__)
class _lowercase :
def __init__( self , A__ , A__ ) -> Tuple:
snake_case = question_encoder
snake_case = generator
snake_case = self.question_encoder
def UpperCamelCase ( self , A__ ) -> int:
if os.path.isfile(A__ ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(A__ , exist_ok=A__ )
snake_case = os.path.join(A__ , '''question_encoder_tokenizer''' )
snake_case = os.path.join(A__ , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(A__ )
self.generator.save_pretrained(A__ )
@classmethod
def UpperCamelCase ( cls , A__ , **A__ ) -> List[Any]:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
snake_case = kwargs.pop('''config''' , A__ )
if config is None:
snake_case = RagConfig.from_pretrained(A__ )
snake_case = AutoTokenizer.from_pretrained(
A__ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
snake_case = AutoTokenizer.from_pretrained(
A__ , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=A__ , generator=A__ )
def __call__( self , *A__ , **A__ ) -> Any:
return self.current_tokenizer(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Tuple:
return self.generator.batch_decode(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Tuple:
return self.generator.decode(*A__ , **A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.question_encoder
def UpperCamelCase ( self ) -> str:
snake_case = self.generator
def UpperCamelCase ( self , A__ , A__ = None , A__ = None , A__ = None , A__ = "longest" , A__ = None , A__ = True , **A__ , ) -> BatchEncoding:
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , A__ , )
if max_length is None:
snake_case = self.current_tokenizer.model_max_length
snake_case = self(
A__ , add_special_tokens=A__ , return_tensors=A__ , max_length=A__ , padding=A__ , truncation=A__ , **A__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case = self.current_tokenizer.model_max_length
snake_case = self(
text_target=A__ , add_special_tokens=A__ , return_tensors=A__ , padding=A__ , max_length=A__ , truncation=A__ , **A__ , )
snake_case = labels['''input_ids''']
return model_inputs
| 342
| 1
|
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def UpperCAmelCase ( A : str ):
'''simple docstring'''
_UpperCAmelCase = SwinConfig()
_UpperCAmelCase = swin_name.split('_' )
_UpperCAmelCase = name_split[1]
_UpperCAmelCase = int(name_split[4] )
_UpperCAmelCase = int(name_split[3][-1] )
if model_size == "tiny":
_UpperCAmelCase = 96
_UpperCAmelCase = (2, 2, 6, 2)
_UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "small":
_UpperCAmelCase = 96
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "base":
_UpperCAmelCase = 128
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (4, 8, 16, 32)
else:
_UpperCAmelCase = 192
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (6, 12, 24, 48)
if "in22k" in swin_name:
_UpperCAmelCase = 2_1841
else:
_UpperCAmelCase = 1000
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(A ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = img_size
_UpperCAmelCase = num_classes
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
return config
def UpperCAmelCase ( A : List[str] ):
'''simple docstring'''
if "patch_embed.proj" in name:
_UpperCAmelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_UpperCAmelCase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_UpperCAmelCase = 'encoder.' + name
if "attn.proj" in name:
_UpperCAmelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_UpperCAmelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_UpperCAmelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_UpperCAmelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_UpperCAmelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_UpperCAmelCase = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
_UpperCAmelCase = 'layernorm.weight'
if name == "norm.bias":
_UpperCAmelCase = 'layernorm.bias'
if "head" in name:
_UpperCAmelCase = name.replace('head' , 'classifier' )
else:
_UpperCAmelCase = 'swin.' + name
return name
def UpperCAmelCase ( A : Dict , A : Tuple ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_UpperCAmelCase = orig_state_dict.pop(A )
if "mask" in key:
continue
elif "qkv" in key:
_UpperCAmelCase = key.split('.' )
_UpperCAmelCase = int(key_split[1] )
_UpperCAmelCase = int(key_split[3] )
_UpperCAmelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCAmelCase = val[:dim, :]
_UpperCAmelCase = val[
dim : dim * 2, :
]
_UpperCAmelCase = val[-dim:, :]
else:
_UpperCAmelCase = val[
:dim
]
_UpperCAmelCase = val[
dim : dim * 2
]
_UpperCAmelCase = val[
-dim:
]
else:
_UpperCAmelCase = val
return orig_state_dict
def UpperCAmelCase ( A : Optional[Any] , A : List[str] ):
'''simple docstring'''
_UpperCAmelCase = timm.create_model(A , pretrained=A )
timm_model.eval()
_UpperCAmelCase = get_swin_config(A )
_UpperCAmelCase = SwinForImageClassification(A )
model.eval()
_UpperCAmelCase = convert_state_dict(timm_model.state_dict() , A )
model.load_state_dict(A )
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
_UpperCAmelCase = Image.open(requests.get(A , stream=A ).raw )
_UpperCAmelCase = image_processor(images=A , return_tensors='pt' )
_UpperCAmelCase = timm_model(inputs['pixel_values'] )
_UpperCAmelCase = model(**A ).logits
assert torch.allclose(A , A , atol=1e-3 )
print(f'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(A )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowercase = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 24
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = DiTPipeline
_UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
_UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
def lowerCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
_UpperCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=snake_case , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=snake_case , )
_UpperCAmelCase = AutoencoderKL()
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> Optional[Any]:
if str(snake_case ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(snake_case )
else:
_UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case )
_UpperCAmelCase = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_dummy_inputs(snake_case )
_UpperCAmelCase = pipe(**snake_case ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_UpperCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case , 1E-3 )
def lowerCamelCase_ ( self ) -> Any:
self._test_inference_batch_single_identical(relax_max_difference=snake_case , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCamelCase_ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
_UpperCAmelCase = ['vase', 'umbrella', 'white shark', 'white wolf']
_UpperCAmelCase = pipe.get_label_ids(snake_case )
_UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(snake_case , snake_case ):
_UpperCAmelCase = load_numpy(
f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1E-2
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
_UpperCAmelCase = ['vase', 'umbrella']
_UpperCAmelCase = pipe.get_label_ids(snake_case )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(snake_case , snake_case ):
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 24
| 1
|
'''simple docstring'''
def _a (lowercase__ : list , lowercase__ : list , lowercase__ : int ) -> int:
"""simple docstring"""
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
__snake_case = [p / w for p, w in zip(lowercase__ , lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
__snake_case = sorted(lowercase__ )
# declaring useful variables
__snake_case = len(lowercase__ )
__snake_case = 0
__snake_case = 0
__snake_case = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
__snake_case = sorted_profit_by_weight[length - i - 1]
__snake_case = profit_by_weight.index(lowercase__ )
__snake_case = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"Input profits, weights, and then max_weight (all positive ints) separated by "
"spaces."
)
_a : str = [int(x) for x in input("Input profits separated by spaces: ").split()]
_a : str = [int(x) for x in input("Input weights separated by spaces: ").split()]
_a : Any = int(input("Max weight allowed: "))
# Function Call
calc_profit(profit, weight, max_weight)
| 56
|
"""simple docstring"""
def _snake_case ( lowercase__ = 1 , lowercase__ = 1000 ):
_lowerCamelCase : Optional[int] = 1
_lowerCamelCase : List[Any] = 0
for divide_by_number in range(lowercase__ , digit + 1 ):
_lowerCamelCase : list[int] = []
_lowerCamelCase : Dict = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(lowercase__ ):
_lowerCamelCase : Any = len(lowercase__ )
_lowerCamelCase : Any = divide_by_number
else:
has_been_divided.append(lowercase__ )
_lowerCamelCase : int = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 630
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self: int , __UpperCamelCase: List[str] , __UpperCamelCase: Optional[Any]=7 , __UpperCamelCase: Union[str, Any]=3 , __UpperCamelCase: Dict=18 , __UpperCamelCase: Any=30 , __UpperCamelCase: Optional[int]=4_00 , __UpperCamelCase: List[Any]=True , __UpperCamelCase: Tuple=None , __UpperCamelCase: Union[str, Any]=True , ):
'''simple docstring'''
__magic_name__ = size if size is not None else {'height': 18, 'width': 18}
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = apply_ocr
def _SCREAMING_SNAKE_CASE ( self: int ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Optional[int] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
__magic_name__ = LayoutLMvaImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'apply_ocr' ) )
def _SCREAMING_SNAKE_CASE ( self: List[str] ):
'''simple docstring'''
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
'''simple docstring'''
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , __UpperCamelCase )
self.assertIsInstance(encoding.boxes , __UpperCamelCase )
# Test batched
__magic_name__ = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _SCREAMING_SNAKE_CASE ( self: List[str] ):
'''simple docstring'''
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__magic_name__ = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _SCREAMING_SNAKE_CASE ( self: Optional[int] ):
'''simple docstring'''
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__magic_name__ = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
__magic_name__ = LayoutLMvaImageProcessor()
from datasets import load_dataset
__magic_name__ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__magic_name__ = Image.open(ds[0]['file'] ).convert('RGB' )
__magic_name__ = image_processing(__UpperCamelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__magic_name__ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__magic_name__ = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __UpperCamelCase )
self.assertListEqual(encoding.boxes , __UpperCamelCase )
# with apply_OCR = False
__magic_name__ = LayoutLMvaImageProcessor(apply_ocr=__UpperCamelCase )
__magic_name__ = image_processing(__UpperCamelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 184
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
A__ = logging.get_logger(__name__)
A__ = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class __UpperCamelCase ( SCREAMING_SNAKE_CASE ):
_lowercase : Optional[Any] = "layoutlmv3"
def __init__( self: Tuple , __UpperCamelCase: List[Any]=5_02_65 , __UpperCamelCase: Union[str, Any]=7_68 , __UpperCamelCase: Optional[int]=12 , __UpperCamelCase: List[str]=12 , __UpperCamelCase: List[Any]=30_72 , __UpperCamelCase: Union[str, Any]="gelu" , __UpperCamelCase: Union[str, Any]=0.1 , __UpperCamelCase: Union[str, Any]=0.1 , __UpperCamelCase: List[Any]=5_12 , __UpperCamelCase: int=2 , __UpperCamelCase: Union[str, Any]=0.02 , __UpperCamelCase: Dict=1E-5 , __UpperCamelCase: int=1 , __UpperCamelCase: Dict=0 , __UpperCamelCase: Tuple=2 , __UpperCamelCase: Any=10_24 , __UpperCamelCase: Union[str, Any]=1_28 , __UpperCamelCase: Dict=1_28 , __UpperCamelCase: Any=True , __UpperCamelCase: Any=32 , __UpperCamelCase: int=1_28 , __UpperCamelCase: List[str]=64 , __UpperCamelCase: Union[str, Any]=2_56 , __UpperCamelCase: int=True , __UpperCamelCase: Optional[int]=True , __UpperCamelCase: Union[str, Any]=True , __UpperCamelCase: Union[str, Any]=2_24 , __UpperCamelCase: Tuple=3 , __UpperCamelCase: Any=16 , __UpperCamelCase: List[Any]=None , **__UpperCamelCase: str , ):
'''simple docstring'''
super().__init__(
vocab_size=__UpperCamelCase , hidden_size=__UpperCamelCase , num_hidden_layers=__UpperCamelCase , num_attention_heads=__UpperCamelCase , intermediate_size=__UpperCamelCase , hidden_act=__UpperCamelCase , hidden_dropout_prob=__UpperCamelCase , attention_probs_dropout_prob=__UpperCamelCase , max_position_embeddings=__UpperCamelCase , type_vocab_size=__UpperCamelCase , initializer_range=__UpperCamelCase , layer_norm_eps=__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
__magic_name__ = max_ad_position_embeddings
__magic_name__ = coordinate_size
__magic_name__ = shape_size
__magic_name__ = has_relative_attention_bias
__magic_name__ = rel_pos_bins
__magic_name__ = max_rel_pos
__magic_name__ = has_spatial_attention_bias
__magic_name__ = rel_ad_pos_bins
__magic_name__ = max_rel_ad_pos
__magic_name__ = text_embed
__magic_name__ = visual_embed
__magic_name__ = input_size
__magic_name__ = num_channels
__magic_name__ = patch_size
__magic_name__ = classifier_dropout
class __UpperCamelCase ( SCREAMING_SNAKE_CASE ):
_lowercase : Any = version.parse("1.12" )
@property
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self: int ):
'''simple docstring'''
return 1E-5
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[int] ):
'''simple docstring'''
return 12
def _SCREAMING_SNAKE_CASE ( self: Tuple , __UpperCamelCase: "ProcessorMixin" , __UpperCamelCase: int = -1 , __UpperCamelCase: int = -1 , __UpperCamelCase: bool = False , __UpperCamelCase: Optional["TensorType"] = None , __UpperCamelCase: int = 3 , __UpperCamelCase: int = 40 , __UpperCamelCase: int = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , 'apply_ocr' , __UpperCamelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__magic_name__ = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__magic_name__ = processor.tokenizer.num_special_tokens_to_add(__UpperCamelCase )
__magic_name__ = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
__magic_name__ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__magic_name__ = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__magic_name__ = self._generate_dummy_images(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__magic_name__ = dict(
processor(
__UpperCamelCase , text=__UpperCamelCase , boxes=__UpperCamelCase , return_tensors=__UpperCamelCase , ) )
return inputs
| 184
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Union[str, Any] = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : List[str] = '''perceiver'''
def __init__( self : Union[str, Any] , snake_case_ : List[str]=2_5_6 , snake_case_ : Dict=1_2_8_0 , snake_case_ : str=7_6_8 , snake_case_ : Any=1 , snake_case_ : Tuple=2_6 , snake_case_ : List[str]=8 , snake_case_ : Optional[int]=8 , snake_case_ : Dict=None , snake_case_ : Union[str, Any]=None , snake_case_ : Dict="kv" , snake_case_ : Optional[int]=1 , snake_case_ : List[str]=1 , snake_case_ : Any="gelu" , snake_case_ : Optional[Any]=0.1 , snake_case_ : Optional[int]=0.0_2 , snake_case_ : Dict=1e-12 , snake_case_ : str=True , snake_case_ : Tuple=2_6_2 , snake_case_ : List[Any]=2_0_4_8 , snake_case_ : str=5_6 , snake_case_ : Any=[3_6_8, 4_9_6] , snake_case_ : Dict=1_6 , snake_case_ : str=1_9_2_0 , snake_case_ : List[str]=1_6 , snake_case_ : Dict=[1, 1_6, 2_2_4, 2_2_4] , **snake_case_ : List[Any] , ):
super().__init__(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = num_latents
_UpperCAmelCase = d_latents
_UpperCAmelCase = d_model
_UpperCAmelCase = num_blocks
_UpperCAmelCase = num_self_attends_per_block
_UpperCAmelCase = num_self_attention_heads
_UpperCAmelCase = num_cross_attention_heads
_UpperCAmelCase = qk_channels
_UpperCAmelCase = v_channels
_UpperCAmelCase = cross_attention_shape_for_attention
_UpperCAmelCase = self_attention_widening_factor
_UpperCAmelCase = cross_attention_widening_factor
_UpperCAmelCase = hidden_act
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = use_query_residual
# masked language modeling attributes
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
# image classification attributes
_UpperCAmelCase = image_size
# flow attributes
_UpperCAmelCase = train_size
# multimodal autoencoding attributes
_UpperCAmelCase = num_frames
_UpperCAmelCase = audio_samples_per_frame
_UpperCAmelCase = samples_per_patch
_UpperCAmelCase = output_shape
class A_ ( lowerCAmelCase_ ):
@property
def lowercase ( self : List[Any] ):
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def lowercase ( self : str ):
return 1e-4
def lowercase ( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Any = -1 , snake_case_ : str = -1 , snake_case_ : List[str] = -1 , snake_case_ : Optional[Any] = False , snake_case_ : Optional[Any] = None , snake_case_ : List[str] = 3 , snake_case_ : Optional[int] = 4_0 , snake_case_ : Optional[Any] = 4_0 , ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = preprocessor.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [' '.join(["a"] ) * seq_length] * batch_size
_UpperCAmelCase = dict(preprocessor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = inputs.pop("input_ids" )
return inputs
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch )
_UpperCAmelCase = self._generate_dummy_images(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = dict(preprocessor(images=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 236
|
from __future__ import annotations
import os
from typing import Any
import requests
lowerCAmelCase : str = 'https://api.github.com'
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowerCAmelCase : Optional[Any] = BASE_URL + '/user'
# https://github.com/settings/tokens
lowerCAmelCase : Optional[int] = os.environ.get('USER_TOKEN', '')
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = {
'Authorization': f"token {auth_token}",
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(a , headers=a ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'{key}: {value}')
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.')
| 511
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'facebook/bart-large-mnli'
__snake_case = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
__snake_case = 'text_classifier'
__snake_case = AutoTokenizer
__snake_case = AutoModelForSequenceClassification
__snake_case = ['text', ['text']]
__snake_case = ['text']
def UpperCamelCase_ ( self ) -> str:
super().setup()
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.model.config
_SCREAMING_SNAKE_CASE : Optional[Any] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = int(__lowerCamelCase )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
_SCREAMING_SNAKE_CASE : str = labels
return self.pre_processor(
[text] * len(__lowerCamelCase ) , [F"""This example is {label}""" for label in labels] , return_tensors="pt" , padding="max_length" , )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : List[str] = outputs.logits
_SCREAMING_SNAKE_CASE : List[str] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 718
|
from PIL import Image
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = image.size
_SCREAMING_SNAKE_CASE : Tuple = 0
_SCREAMING_SNAKE_CASE : Dict = image.load()
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__lowerCamelCase ):
for i in range(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
UpperCamelCase__ =mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 381
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase_ : Any = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 673
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : List[str] = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase=False ):
'''simple docstring'''
UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase = """"""
else:
UpperCAmelCase = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = dct.pop(lowerCAmelCase )
UpperCAmelCase = val
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = DeiTConfig()
# all deit models have fine-tuned heads
UpperCAmelCase = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase = 1000
UpperCAmelCase = """huggingface/label-files"""
UpperCAmelCase = """imagenet-1k-id2label.json"""
UpperCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = int(deit_name[-6:-4] )
UpperCAmelCase = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
UpperCAmelCase = 192
UpperCAmelCase = 768
UpperCAmelCase = 12
UpperCAmelCase = 3
elif deit_name[9:].startswith("""small""" ):
UpperCAmelCase = 384
UpperCAmelCase = 1536
UpperCAmelCase = 12
UpperCAmelCase = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
UpperCAmelCase = 1024
UpperCAmelCase = 4096
UpperCAmelCase = 24
UpperCAmelCase = 16
# load original model from timm
UpperCAmelCase = timm.create_model(lowerCAmelCase , pretrained=lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase = timm_model.state_dict()
UpperCAmelCase = create_rename_keys(lowerCAmelCase , lowerCAmelCase )
for src, dest in rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
read_in_q_k_v(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# load HuggingFace model
UpperCAmelCase = DeiTForImageClassificationWithTeacher(lowerCAmelCase ).eval()
model.load_state_dict(lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCAmelCase = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCAmelCase = DeiTImageProcessor(size=lowerCAmelCase , crop_size=config.image_size )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors="""pt""" )
UpperCAmelCase = encoding["""pixel_values"""]
UpperCAmelCase = model(lowerCAmelCase )
UpperCAmelCase = timm_model(lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase_ : str = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 673
| 1
|
from __future__ import annotations
import numpy as np
def __a ( __UpperCAmelCase ):
return np.maximum(0 , A__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 714
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
a_ : str = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 148
| 0
|
def _SCREAMING_SNAKE_CASE ( lowercase : int = 10 ):
'''simple docstring'''
if not isinstance(lowercase , lowercase ) or n < 0:
raise ValueError('Invalid input' )
lowerCamelCase_ = 10**n
lowerCamelCase_ = 2_84_33 * (pow(2 , 7_83_04_57 , lowercase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 70
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase : List[str] = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "facebook/nllb-200-distilled-600M"
lowercase__ = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
lowercase__ = "translator"
lowercase__ = AutoTokenizer
lowercase__ = AutoModelForSeqaSeqLM
lowercase__ = LANGUAGE_CODES
lowercase__ = ["text", "text", "text"]
lowercase__ = ["text"]
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''')
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''')
lowercase_ = self.lang_to_code[src_lang]
lowercase_ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCAmelCase_ , return_tensors="""pt""" , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_)
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : int):
"""simple docstring"""
return self.model.generate(**lowerCAmelCase_)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCAmelCase_)
| 567
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
def __init__( self : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int]=12 , lowerCamelCase__ : Union[str, Any]=7 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Dict=99 , lowerCamelCase__ : str=32 , lowerCamelCase__ : List[str]=32 , lowerCamelCase__ : Dict=2 , lowerCamelCase__ : int=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : Optional[Any]=0.1 , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Tuple=5_12 , lowerCamelCase__ : List[Any]=0.0_2 , lowerCamelCase__ : List[Any]=0 , lowerCamelCase__ : Optional[int]=None , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = parent
_UpperCAmelCase : List[Any] = batch_size
_UpperCAmelCase : str = seq_length
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : str = use_input_mask
_UpperCAmelCase : Optional[Any] = use_labels
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : int = hidden_size
_UpperCAmelCase : Optional[int] = projection_dim
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : Optional[int] = num_attention_heads
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : Optional[Any] = dropout
_UpperCAmelCase : Optional[Any] = attention_dropout
_UpperCAmelCase : Tuple = max_position_embeddings
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : List[Any] = scope
_UpperCAmelCase : int = bos_token_id
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : str = None
if self.use_input_mask:
_UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_UpperCAmelCase : int = input_mask.numpy()
_UpperCAmelCase , _UpperCAmelCase : List[Any] = input_mask.shape
_UpperCAmelCase : Dict = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : List[Any] = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->List[str]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : str , lowerCamelCase__ : Tuple ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : int = TFBlipTextModel(config=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : Dict = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase__ ( self : Any ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = (TFBlipTextModel,) if is_tf_available() else ()
lowerCAmelCase : List[str] = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Optional[Any] = False
def lowerCAmelCase__ ( self : str ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = BlipTextModelTester(self )
_UpperCAmelCase : Any = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Tuple ) ->Dict:
'''simple docstring'''
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def lowerCAmelCase__ ( self : str ) ->Any:
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
pass
@slow
def lowerCAmelCase__ ( self : int ) ->Optional[Any]:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Tuple = TFBlipTextModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Tuple=True ) ->Dict:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=lowerCamelCase__ )
| 40
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
_UpperCAmelCase : str = quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" , revision=__lowerCAmelCase )
| 40
| 1
|
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
a : str = logging.get_logger(__name__)
a : List[str] = '''▁'''
a : Any = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
a : str = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
a : Tuple = {
'''facebook/m2m100_418M''': 1_0_2_4,
}
# fmt: off
a : List[str] = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class a_ ( _UpperCAmelCase ):
a : Tuple = VOCAB_FILES_NAMES
a : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Tuple = PRETRAINED_VOCAB_FILES_MAP
a : Optional[Any] = ['input_ids', 'attention_mask']
a : List[int] = []
a : List[int] = []
def __init__( self : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : int="<s>" , __UpperCamelCase : Union[str, Any]="</s>" , __UpperCamelCase : int="</s>" , __UpperCamelCase : List[Any]="<pad>" , __UpperCamelCase : Union[str, Any]="<unk>" , __UpperCamelCase : Dict="m2m100" , __UpperCamelCase : Optional[Dict[str, Any]] = None , __UpperCamelCase : Union[str, Any]=8 , **__UpperCamelCase : Optional[int] , ) ->None:
'''simple docstring'''
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
_UpperCAmelCase = language_codes
_UpperCAmelCase = FAIRSEQ_LANGUAGE_CODES[language_codes]
_UpperCAmelCase = {lang_code: f"""__{lang_code}__""" for lang_code in fairseq_language_code}
_UpperCAmelCase = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__UpperCamelCase )
for lang_code in fairseq_language_code
if self.get_lang_token(__UpperCamelCase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__UpperCamelCase , tgt_lang=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , language_codes=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = load_json(__UpperCamelCase )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
_UpperCAmelCase = spm_file
_UpperCAmelCase = load_spm(__UpperCamelCase , self.sp_model_kwargs )
_UpperCAmelCase = len(self.encoder )
_UpperCAmelCase = {
self.get_lang_token(__UpperCamelCase ): self.encoder_size + i for i, lang_code in enumerate(__UpperCamelCase )
}
_UpperCAmelCase = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__UpperCamelCase )}
_UpperCAmelCase = {v: k for k, v in self.lang_token_to_id.items()}
_UpperCAmelCase = src_lang if src_lang is not None else """en"""
_UpperCAmelCase = tgt_lang
_UpperCAmelCase = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_UpperCAmelCase = num_madeup_words
@property
def _snake_case ( self : Optional[int] ) ->int:
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _snake_case ( self : Optional[Any] ) ->str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _snake_case ( self : List[Any] , __UpperCamelCase : str ) ->None:
'''simple docstring'''
_UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self : List[str] , __UpperCamelCase : str ) ->List[str]:
'''simple docstring'''
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : Optional[Any] ) ->Dict:
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__UpperCamelCase , self.encoder[self.unk_token] )
def _snake_case ( self : Dict , __UpperCamelCase : int ) ->str:
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__UpperCamelCase , self.unk_token )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCamelCase ) + token
_UpperCAmelCase = []
else:
current_sub_tokens.append(__UpperCamelCase )
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string.strip()
def _snake_case ( self : Tuple , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
_UpperCAmelCase = [1] * len(self.prefix_tokens )
_UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCamelCase )) + ([0] * len(__UpperCamelCase )) + suffix_ones
def _snake_case ( self : Any , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self : Dict , __UpperCamelCase : Dict ) ->None:
'''simple docstring'''
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_UpperCAmelCase = {}
_UpperCAmelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def _snake_case ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
_UpperCAmelCase = Path(__UpperCamelCase )
if not save_dir.is_dir():
raise OSError(f"""{save_directory} should be a directory""" )
_UpperCAmelCase = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_UpperCAmelCase = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , __UpperCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __UpperCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(__UpperCamelCase , """wb""" ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (str(__UpperCamelCase ), str(__UpperCamelCase ))
def _snake_case ( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str = "en" , __UpperCamelCase : Optional[List[str]] = None , __UpperCamelCase : str = "ro" , **__UpperCamelCase : int , ) ->BatchEncoding:
'''simple docstring'''
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[str] , __UpperCamelCase : Optional[str] , **__UpperCamelCase : Any ) ->Optional[Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_UpperCAmelCase = src_lang
_UpperCAmelCase = self(__UpperCamelCase , add_special_tokens=__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = self.get_lang_id(__UpperCamelCase )
_UpperCAmelCase = tgt_lang_id
return inputs
def _snake_case ( self : Dict ) ->Optional[int]:
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self : Dict , __UpperCamelCase : str ) ->None:
'''simple docstring'''
_UpperCAmelCase = self.get_lang_token(__UpperCamelCase )
_UpperCAmelCase = self.lang_token_to_id[lang_token]
_UpperCAmelCase = [self.cur_lang_id]
_UpperCAmelCase = [self.eos_token_id]
def _snake_case ( self : Optional[Any] , __UpperCamelCase : str ) ->None:
'''simple docstring'''
_UpperCAmelCase = self.get_lang_token(__UpperCamelCase )
_UpperCAmelCase = self.lang_token_to_id[lang_token]
_UpperCAmelCase = [self.cur_lang_id]
_UpperCAmelCase = [self.eos_token_id]
def _snake_case ( self : List[str] , __UpperCamelCase : str ) ->str:
'''simple docstring'''
return self.lang_code_to_token[lang]
def _snake_case ( self : List[str] , __UpperCamelCase : str ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.get_lang_token(__UpperCamelCase )
return self.lang_token_to_id[lang_token]
def _UpperCamelCase ( _A , _A ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
_UpperCAmelCase = sentencepiece.SentencePieceProcessor(**_A )
spm.Load(str(_A ) )
return spm
def _UpperCamelCase ( _A ) -> Union[Dict, List]:
"""simple docstring"""
with open(_A , """r""" ) as f:
return json.load(_A )
def _UpperCamelCase ( _A , _A ) -> None:
"""simple docstring"""
with open(_A , """w""" ) as f:
json.dump(_A , _A , indent=2 )
| 555
|
"""simple docstring"""
import cva
import numpy as np
class a_ :
def __init__( self : Optional[Any] , __UpperCamelCase : float , __UpperCamelCase : int ) ->Dict:
'''simple docstring'''
if k in (0.0_4, 0.0_6):
_UpperCAmelCase = k
_UpperCAmelCase = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Tuple ) ->str:
'''simple docstring'''
return str(self.k )
def _snake_case ( self : str , __UpperCamelCase : str ) ->tuple[cva.Mat, list[list[int]]]:
'''simple docstring'''
_UpperCAmelCase = cva.imread(__UpperCamelCase , 0 )
_UpperCAmelCase ,_UpperCAmelCase = img.shape
_UpperCAmelCase = []
_UpperCAmelCase = img.copy()
_UpperCAmelCase = cva.cvtColor(__UpperCamelCase , cva.COLOR_GRAY2RGB )
_UpperCAmelCase ,_UpperCAmelCase = np.gradient(__UpperCamelCase )
_UpperCAmelCase = dx**2
_UpperCAmelCase = dy**2
_UpperCAmelCase = dx * dy
_UpperCAmelCase = 0.0_4
_UpperCAmelCase = self.window_size // 2
for y in range(__UpperCamelCase , h - offset ):
for x in range(__UpperCamelCase , w - offset ):
_UpperCAmelCase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_UpperCAmelCase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_UpperCAmelCase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_UpperCAmelCase = (wxx * wyy) - (wxy**2)
_UpperCAmelCase = wxx + wyy
_UpperCAmelCase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
a : List[Any] = HarrisCorner(0.04, 3)
a , a : List[Any] = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 555
| 1
|
"""simple docstring"""
__lowerCAmelCase : Any = range(2, 20 + 1)
__lowerCAmelCase : Optional[int] = [10**k for k in range(ks[-1] + 1)]
__lowerCAmelCase : dict[int, dict[int, list[list[int]]]] = {}
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = sum(a_i[j] for j in range(__UpperCamelCase , len(__UpperCamelCase ) ) )
snake_case_ : str = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) , __UpperCamelCase ) ) )
snake_case_ , snake_case_ : str = 0, 0
snake_case_ : int = n - i
snake_case_ : Any = memo.get(__UpperCamelCase )
if sub_memo is not None:
snake_case_ : Any = sub_memo.get(__UpperCamelCase )
if jumps is not None and len(__UpperCamelCase ) > 0:
# find and make the largest jump without going over
snake_case_ : Optional[int] = -1
for _k in range(len(__UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
snake_case_ : Union[str, Any] = _k
break
if max_jump >= 0:
snake_case_ , snake_case_ , snake_case_ : str = jumps[max_jump]
# since the difference between jumps is cached, add c
snake_case_ : List[str] = diff + c
for j in range(min(__UpperCamelCase , len(__UpperCamelCase ) ) ):
snake_case_ , snake_case_ : List[str] = divmod(__UpperCamelCase , 1_0 )
if new_c > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
snake_case_ : Tuple = []
else:
snake_case_ : List[Any] = {c: []}
snake_case_ : Tuple = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
snake_case_ , snake_case_ : Tuple = next_term(__UpperCamelCase , k - 1 , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
snake_case_ , snake_case_ : List[str] = compute(__UpperCamelCase , __UpperCamelCase , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
snake_case_ : Optional[int] = sub_memo[c]
# keep jumps sorted by # of terms skipped
snake_case_ : Union[str, Any] = 0
while j < len(__UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Any ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(__UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
snake_case_ : str = i
snake_case_ , snake_case_ , snake_case_ : List[str] = 0, 0, 0
for j in range(len(__UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
snake_case_ : Union[str, Any] = ds_c + ds_b
diff += addend
snake_case_ : Tuple = 0
for j in range(__UpperCamelCase ):
snake_case_ : Any = a_i[j] + addend
snake_case_ , snake_case_ : Optional[int] = divmod(__UpperCamelCase , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return diff, i - start_i
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
snake_case_ : Tuple = digits[j] + addend
if s >= 1_0:
snake_case_ , snake_case_ : Optional[Any] = divmod(__UpperCamelCase , 1_0 )
snake_case_ : Dict = addend // 1_0 + quotient
else:
snake_case_ : Tuple = s
snake_case_ : Union[str, Any] = addend // 1_0
if addend == 0:
break
while addend > 0:
snake_case_ , snake_case_ : Dict = divmod(__UpperCamelCase , 1_0 )
digits.append(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : int = 1_0**1_5 ):
'''simple docstring'''
snake_case_ : List[str] = [1]
snake_case_ : Any = 1
snake_case_ : str = 0
while True:
snake_case_ , snake_case_ : int = next_term(__UpperCamelCase , 2_0 , i + dn , __UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
snake_case_ : Tuple = 0
for j in range(len(__UpperCamelCase ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 21
|
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
snake_case_ : List[Any] = AutoTokenizer.from_pretrained("""google/mt5-small""" )
snake_case_ : Dict = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
snake_case_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
snake_case_ : Optional[Any] = shift_tokens_right(_lowercase , model.config.pad_token_id , model.config.decoder_start_token_id )
snake_case_ : Tuple = model(_lowercase , decoder_input_ids=_lowercase ).logits
snake_case_ : Tuple = optax.softmax_cross_entropy(_lowercase , onehot(_lowercase , logits.shape[-1] ) ).mean()
snake_case_ : List[str] = -(labels.shape[-1] * loss.item())
snake_case_ : Optional[int] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 21
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowercase__ ( A_ ):
def UpperCamelCase_ ( self) -> Optional[int]:
_lowerCamelCase : Dict = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """tf_padding"""))
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """depth_multiplier"""))
class lowercase__ :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=0.25 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="relu6" , SCREAMING_SNAKE_CASE=1280 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=None , ) -> Any:
_lowerCamelCase : Optional[Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : Optional[Any] = num_channels
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : Tuple = depth_multiplier
_lowerCamelCase : int = depth_divisible_by
_lowerCamelCase : Tuple = min_depth
_lowerCamelCase : Any = expand_ratio
_lowerCamelCase : int = tf_padding
_lowerCamelCase : str = output_stride
_lowerCamelCase : str = first_layer_is_expansion
_lowerCamelCase : Optional[int] = finegrained_output
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : Optional[int] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier)
_lowerCamelCase : Any = classifier_dropout_prob
_lowerCamelCase : Any = use_labels
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : Tuple = num_labels
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[str] = scope
def UpperCamelCase_ ( self) -> Union[str, Any]:
_lowerCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCamelCase : List[Any] = None
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels)
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
_lowerCamelCase : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self) -> str:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> str:
_lowerCamelCase : Tuple = MobileNetVaModel(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_lowerCamelCase : str = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Tuple:
_lowerCamelCase : Tuple = self.num_labels
_lowerCamelCase : Union[str, Any] = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_lowerCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> int:
_lowerCamelCase : Tuple = self.num_labels
_lowerCamelCase : List[str] = MobileNetVaForSemanticSegmentation(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_lowerCamelCase : int = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = config_and_inputs
_lowerCamelCase : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( A_ ,A_ ,unittest.TestCase ):
__UpperCAmelCase = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : int = MobileNetVaModelTester(self)
_lowerCamelCase : Dict = MobileNetVaConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""")
def UpperCamelCase_ ( self) -> Optional[int]:
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""")
def UpperCamelCase_ ( self) -> Dict:
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""")
def UpperCamelCase_ ( self) -> Any:
pass
def UpperCamelCase_ ( self) -> List[Any]:
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> int:
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> List[Any]:
def check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
_lowerCamelCase : List[str] = model_class(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE))
_lowerCamelCase : Optional[Any] = outputs.hidden_states
_lowerCamelCase : Tuple = 16
self.assertEqual(len(SCREAMING_SNAKE_CASE) , SCREAMING_SNAKE_CASE)
_lowerCamelCase , _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Optional[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Tuple:
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE)
@slow
def UpperCamelCase_ ( self) -> Tuple:
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : str = MobileNetVaModel.from_pretrained(SCREAMING_SNAKE_CASE)
self.assertIsNotNone(SCREAMING_SNAKE_CASE)
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self) -> Any:
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""") if is_vision_available() else None
)
@slow
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : List[str] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""").to(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = self.default_image_processor
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
_lowerCamelCase : str = model(**SCREAMING_SNAKE_CASE)
# verify the logits
_lowerCamelCase : Optional[int] = torch.Size((1, 1001))
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE)
_lowerCamelCase : Tuple = torch.tensor([0.24_45, -1.19_93, 0.19_05]).to(SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4))
@slow
def UpperCamelCase_ ( self) -> List[str]:
_lowerCamelCase : Optional[int] = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""")
_lowerCamelCase : str = model.to(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Dict = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""")
_lowerCamelCase : Union[str, Any] = prepare_img()
_lowerCamelCase : Any = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = outputs.logits
# verify the logits
_lowerCamelCase : Union[str, Any] = torch.Size((1, 21, 65, 65))
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = torch.tensor(
[
[[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]],
[[-2.15_95, -2.09_77, -2.37_41], [-2.42_26, -2.30_28, -2.68_35], [-2.78_19, -2.59_91, -2.77_06]],
[[4.20_58, 4.83_17, 4.76_38], [4.41_36, 5.03_61, 4.93_83], [4.50_28, 4.96_44, 4.87_34]],
] , device=SCREAMING_SNAKE_CASE , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4))
| 88
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase_ : str = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='timesformer'
def __init__( self : Optional[int] , __a : Optional[int]=2_24 , __a : Tuple=16 , __a : int=3 , __a : Union[str, Any]=8 , __a : Union[str, Any]=7_68 , __a : List[str]=12 , __a : Union[str, Any]=12 , __a : Optional[Any]=30_72 , __a : Tuple="gelu" , __a : str=0.0 , __a : List[Any]=0.0 , __a : Any=0.02 , __a : List[str]=1e-6 , __a : Any=True , __a : Union[str, Any]="divided_space_time" , __a : str=0 , **__a : Tuple , ):
super().__init__(**__a )
_a = image_size
_a = patch_size
_a = num_channels
_a = num_frames
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = layer_norm_eps
_a = qkv_bias
_a = attention_type
_a = drop_path_rate
| 692
| 0
|
"""simple docstring"""
a = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int ) -> str:
'''simple docstring'''
assert len(str(_snake_case ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_A = year // 1_00
_A = (5 * (century % 4) + 2) % 7
_A = year % 1_00
_A = centurian % 12
_A = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_A = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_A = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 505
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _snake_case ( _snake_case : List[Any] ) -> Any:
'''simple docstring'''
_A = {}
_A = tokenizer(example['content'] , truncation=_snake_case )['input_ids']
_A = len(example['content'] ) / len(output['input_ids'] )
return output
a = HfArgumentParser(PretokenizationArguments)
a = parser.parse_args()
if args.num_workers is None:
a = multiprocessing.cpu_count()
a = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a = time.time()
a = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a = time.time()
a = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 505
| 1
|
'''simple docstring'''
from collections.abc import Sequence
def _a (lowercase__ : Sequence[int] | None = None ) -> int:
"""simple docstring"""
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
__snake_case = nums[0]
for i in range(1 , len(lowercase__ ) ):
__snake_case = nums[i]
__snake_case = max(lowercase__ , ans + num , lowercase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_a : int = int(input("Enter number of elements : ").strip())
_a : int = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 56
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ = KandinskyVaaPipeline
lowercase_ = [
"""image_embeds""",
"""negative_image_embeds""",
]
lowercase_ = ["""image_embeds""", """negative_image_embeds"""]
lowercase_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowercase_ = False
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return 3_2
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return 3_2
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return 1_0_0
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__A =UNetaDConditionModel(**lowercase__ )
return model
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A =VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.dummy_unet
__A =self.dummy_movq
__A =DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=lowercase__ , set_alpha_to_one=lowercase__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=lowercase__ , )
__A ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCamelCase ( self , lowercase__ , lowercase__=0 ):
'''simple docstring'''
__A =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
__A =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowercase__ )
if str(lowercase__ ).startswith('''mps''' ):
__A =torch.manual_seed(lowercase__ )
else:
__A =torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__A ={
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __UpperCamelCase ( self ):
'''simple docstring'''
__A ='''cpu'''
__A =self.get_dummy_components()
__A =self.pipeline_class(**lowercase__ )
__A =pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__A =pipe(**self.get_dummy_inputs(lowercase__ ) )
__A =output.images
__A =pipe(
**self.get_dummy_inputs(lowercase__ ) , return_dict=lowercase__ , )[0]
__A =image[0, -3:, -3:, -1]
__A =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__A =np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
__A =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(lowercase__ )
__A =KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
__A =pipeline.to(lowercase__ )
pipeline.set_progress_bar_config(disable=lowercase__ )
__A ='''red cat, 4k photo'''
__A =torch.Generator(device='''cuda''' ).manual_seed(0 )
__A , __A =pipe_prior(
lowercase__ , generator=lowercase__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__A =torch.Generator(device='''cuda''' ).manual_seed(0 )
__A =pipeline(
image_embeds=lowercase__ , negative_image_embeds=lowercase__ , generator=lowercase__ , num_inference_steps=1_0_0 , output_type='''np''' , )
__A =output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__ )
| 184
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase : Tuple = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Dict = ["ConvNextFeatureExtractor"]
__UpperCAmelCase : Dict = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Dict = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[str] = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 707
|
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]:
# load base model
__snake_case: str = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa)
# load LoRA weight from .safetensors
__snake_case: Dict = load_file(SCREAMING_SNAKE_CASE__)
__snake_case: List[str] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__snake_case: Optional[int] = key.split(""".""")[0].split(LORA_PREFIX_TEXT_ENCODER + """_""")[-1].split("""_""")
__snake_case: Union[str, Any] = pipeline.text_encoder
else:
__snake_case: Optional[int] = key.split(""".""")[0].split(LORA_PREFIX_UNET + """_""")[-1].split("""_""")
__snake_case: List[Any] = pipeline.unet
# find the target layer
__snake_case: Optional[Any] = layer_infos.pop(0)
while len(SCREAMING_SNAKE_CASE__) > -1:
try:
__snake_case: Optional[Any] = curr_layer.__getattr__(SCREAMING_SNAKE_CASE__)
if len(SCREAMING_SNAKE_CASE__) > 0:
__snake_case: Optional[int] = layer_infos.pop(0)
elif len(SCREAMING_SNAKE_CASE__) == 0:
break
except Exception:
if len(SCREAMING_SNAKE_CASE__) > 0:
temp_name += "_" + layer_infos.pop(0)
else:
__snake_case: Tuple = layer_infos.pop(0)
__snake_case: Any = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up"""))
pair_keys.append(SCREAMING_SNAKE_CASE__)
else:
pair_keys.append(SCREAMING_SNAKE_CASE__)
pair_keys.append(key.replace("""lora_up""" , """lora_down"""))
# update weight
if len(state_dict[pair_keys[0]].shape) == 4:
__snake_case: Union[str, Any] = state_dict[pair_keys[0]].squeeze(3).squeeze(2).to(torch.floataa)
__snake_case: Dict = state_dict[pair_keys[1]].squeeze(3).squeeze(2).to(torch.floataa)
curr_layer.weight.data += alpha * torch.mm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__).unsqueeze(2).unsqueeze(3)
else:
__snake_case: List[Any] = state_dict[pair_keys[0]].to(torch.floataa)
__snake_case: Dict = state_dict[pair_keys[1]].to(torch.floataa)
curr_layer.weight.data += alpha * torch.mm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# update visited list
for item in pair_keys:
visited.append(SCREAMING_SNAKE_CASE__)
return pipeline
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format."
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors"
)
parser.add_argument(
"--lora_prefix_text_encoder",
default="lora_te",
type=str,
help="The prefix of text encoder weight in safetensors",
)
parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW")
parser.add_argument(
"--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not."
)
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
__UpperCAmelCase : str = parser.parse_args()
__UpperCAmelCase : Union[str, Any] = args.base_model_path
__UpperCAmelCase : str = args.checkpoint_path
__UpperCAmelCase : List[str] = args.dump_path
__UpperCAmelCase : Optional[int] = args.lora_prefix_unet
__UpperCAmelCase : Optional[int] = args.lora_prefix_text_encoder
__UpperCAmelCase : int = args.alpha
__UpperCAmelCase : List[Any] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__UpperCAmelCase : Union[str, Any] = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 155
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class UpperCamelCase__ :
'''simple docstring'''
_UpperCamelCase = PegasusConfig
_UpperCamelCase = {}
_UpperCamelCase = 'gelu'
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=7 ,_lowerCAmelCase=True ,_lowerCAmelCase=False ,_lowerCAmelCase=99 ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=40 ,_lowerCAmelCase=2 ,_lowerCAmelCase=1 ,_lowerCAmelCase=0 ,):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = eos_token_id
lowerCamelCase__ = pad_token_id
lowerCamelCase__ = bos_token_id
def UpperCamelCase_ ( self ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
lowerCamelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
lowerCamelCase__ = tf.concat([input_ids, eos_tensor] ,axis=1 )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCamelCase__ = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
lowerCamelCase__ = prepare_pegasus_inputs_dict(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
return config, inputs_dict
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = TFPegasusModel(config=_lowerCAmelCase ).get_decoder()
lowerCamelCase__ = inputs_dict["""input_ids"""]
lowerCamelCase__ = input_ids[:1, :]
lowerCamelCase__ = inputs_dict["""attention_mask"""][:1, :]
lowerCamelCase__ = inputs_dict["""head_mask"""]
lowerCamelCase__ = 1
# first forward pass
lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,head_mask=_lowerCAmelCase ,use_cache=_lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowerCamelCase__ = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
lowerCamelCase__ = tf.concat([input_ids, next_tokens] ,axis=-1 )
lowerCamelCase__ = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase )[0]
lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,past_key_values=_lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
lowerCamelCase__ = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
lowerCamelCase__ = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCAmelCase ,_lowerCAmelCase ,rtol=1E-3 )
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : str=None , ):
if attention_mask is None:
lowerCamelCase__ = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase__ (a ,a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
_UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
_UpperCamelCase = (
{
'conversational': TFPegasusForConditionalGeneration,
'feature-extraction': TFPegasusModel,
'summarization': TFPegasusForConditionalGeneration,
'text2text-generation': TFPegasusForConditionalGeneration,
'translation': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
lowerCamelCase__ = TFPegasusModelTester(self )
lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
_UpperCamelCase = [
'California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'
' reduce the risk of wildfires.',
'N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
_UpperCamelCase = 'google/pegasus-xsum'
@cached_property
def UpperCamelCase_ ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase_ ( self ):
lowerCamelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
lowerCamelCase__ = self.translate_src_text(**_lowerCAmelCase )
assert self.expected_text == generated_words
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
lowerCamelCase__ = self.tokenizer(self.src_text ,**_lowerCAmelCase ,padding=_lowerCAmelCase ,return_tensors="""tf""" )
lowerCamelCase__ = self.model.generate(
model_inputs.input_ids ,attention_mask=model_inputs.attention_mask ,num_beams=2 ,use_cache=_lowerCAmelCase ,)
lowerCamelCase__ = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=_lowerCAmelCase )
return generated_words
@slow
def UpperCamelCase_ ( self ):
self._assert_generated_batch_equal_expected()
| 50
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 678
| 0
|
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
A = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=768 ):
super().__init__(_UpperCAmelCase )
__a : str = proj_size
__a : Optional[Any] = CLIPVisionModel(_UpperCAmelCase )
__a : List[Any] = PaintByExampleMapper(_UpperCAmelCase )
__a : int = nn.LayerNorm(config.hidden_size )
__a : List[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__a : int = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=False ):
__a : str = self.model(pixel_values=_UpperCAmelCase )
__a : Union[str, Any] = clip_output.pooler_output
__a : Optional[int] = self.mapper(latent_states[:, None] )
__a : int = self.final_layer_norm(_UpperCAmelCase )
__a : Optional[Any] = self.proj_out(_UpperCAmelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
super().__init__()
__a : List[str] = (config.num_hidden_layers + 1) // 5
__a : Optional[Any] = config.hidden_size
__a : str = 1
__a : Union[str, Any] = nn.ModuleList(
[
BasicTransformerBlock(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , activation_fn='''gelu''' , attention_bias=_UpperCAmelCase )
for _ in range(_UpperCAmelCase )
] )
def _lowerCamelCase ( self , _UpperCAmelCase ):
for block in self.blocks:
__a : Union[str, Any] = block(_UpperCAmelCase )
return hidden_states
| 712
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __A ( a_ :Tuple) -> Dict:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a_)
def __A ( a_ :Any) -> int:
from transformers.testing_utils import pytest_terminal_summary_main
__a : str = terminalreporter.config.getoption('''--make-reports''')
if make_reports:
pytest_terminal_summary_main(a_ , id=a_)
| 101
| 0
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case_ : List[str] = logging.get_logger(__name__)
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = ["pixel_values"]
def __init__( self : Dict , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : bool = True , __magic_name__ : Union[int, float] = 1 / 255 , __magic_name__ : bool = True , __magic_name__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , __magic_name__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **__magic_name__ : Optional[Any] , ) -> None:
super().__init__(**__magic_name__ )
lowerCamelCase_ : List[str] = size if size is not None else {"shortest_edge": 224}
lowerCamelCase_ : Tuple = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase_ : str = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCamelCase_ : List[Any] = get_size_dict(__magic_name__ , param_name="crop_size" )
lowerCamelCase_ : Optional[Any] = do_resize
lowerCamelCase_ : str = size
lowerCamelCase_ : Optional[Any] = resample
lowerCamelCase_ : Optional[Any] = do_center_crop
lowerCamelCase_ : Tuple = crop_size
lowerCamelCase_ : Dict = do_rescale
lowerCamelCase_ : List[Any] = rescale_factor
lowerCamelCase_ : Dict = do_normalize
lowerCamelCase_ : Optional[int] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCamelCase_ : Optional[int] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : int , ) -> np.ndarray:
lowerCamelCase_ : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowerCamelCase_ : List[Any] = int((256 / 224) * size["shortest_edge"] )
lowerCamelCase_ : List[str] = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase_ : List[Any] = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
__magic_name__ , size=(size_dict["height"], size_dict["width"]) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Optional[int] , ) -> np.ndarray:
lowerCamelCase_ : List[str] = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(__magic_name__ , size=(size["height"], size["width"]) , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : np.ndarray , __magic_name__ : Union[int, float] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Optional[Any] , ) -> np.ndarray:
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : np.ndarray , __magic_name__ : Union[float, List[float]] , __magic_name__ : Union[float, List[float]] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Tuple , ) -> np.ndarray:
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : ImageInput , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Dict[str, int]] = None , __magic_name__ : PILImageResampling = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Dict[str, int]] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[float] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[float, Iterable[float]]] = None , __magic_name__ : Optional[Union[float, Iterable[float]]] = None , __magic_name__ : Optional[TensorType] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Optional[Any] , ) -> BatchFeature:
lowerCamelCase_ : List[str] = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ : Optional[Any] = resample if resample is not None else self.resample
lowerCamelCase_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ : int = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ : Optional[Any] = image_std if image_std is not None else self.image_std
lowerCamelCase_ : Optional[int] = size if size is not None else self.size
lowerCamelCase_ : List[Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ : List[str] = get_size_dict(__magic_name__ , param_name="crop_size" )
lowerCamelCase_ : int = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCamelCase_ : Optional[Any] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCamelCase_ : List[Any] = [self.resize(__magic_name__ , __magic_name__ , __magic_name__ ) for image in images]
if do_center_crop:
lowerCamelCase_ : Dict = [self.center_crop(__magic_name__ , __magic_name__ ) for image in images]
if do_rescale:
lowerCamelCase_ : str = [self.rescale(__magic_name__ , __magic_name__ ) for image in images]
if do_normalize:
lowerCamelCase_ : Optional[int] = [self.normalize(__magic_name__ , __magic_name__ , __magic_name__ ) for image in images]
lowerCamelCase_ : str = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCamelCase_ : str = {"pixel_values": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 488
|
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case_ ( __A ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
lowerCamelCase_ : List[str] = tempfile.mkdtemp()
lowerCamelCase_ : Dict = 8
# DPR tok
lowerCamelCase_ : str = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCamelCase_ : Optional[int] = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase_ : List[str] = os.path.join(__magic_name__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowerCamelCase_ : Tuple = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowerCamelCase_ : str = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCamelCase_ : Optional[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCamelCase_ : Any = {"unk_token": "<unk>"}
lowerCamelCase_ : Dict = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase_ : Tuple = os.path.join(__magic_name__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ : int = os.path.join(__magic_name__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__magic_name__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
lowerCamelCase_ : Optional[Any] = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
lowerCamelCase_ : str = self.get_dummy_dataset()
lowerCamelCase_ : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowerCamelCase_ : Tuple = dataset
lowerCamelCase_ : Optional[int] = RagRetriever(
__magic_name__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : bool ) -> List[Any]:
lowerCamelCase_ : Optional[int] = self.get_dummy_dataset()
lowerCamelCase_ : Union[str, Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
lowerCamelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , "dataset" )
lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
lowerCamelCase_ : Optional[int] = RagRetriever(
__magic_name__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowerCamelCase_ : List[Any] = RagRetriever(
__magic_name__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __magic_name__ ) , )
return retriever
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
lowerCamelCase_ : str = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase_ : List[str] = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
lowerCamelCase_ : List[str] = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
lowerCamelCase_ : Optional[int] = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(__magic_name__ , open(__magic_name__ , "wb" ) )
lowerCamelCase_ : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
lowerCamelCase_ : Any = RagRetriever(
__magic_name__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
lowerCamelCase_ : Optional[Any] = 1
lowerCamelCase_ : int = self.get_dummy_canonical_hf_index_retriever()
lowerCamelCase_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Any = retriever.retrieve(__magic_name__ , n_docs=__magic_name__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__magic_name__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , __magic_name__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
lowerCamelCase_ : Dict = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowerCamelCase_ : Dict = self.get_dummy_dataset()
retriever.save_pretrained(__magic_name__ )
lowerCamelCase_ : List[Any] = RagRetriever.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
lowerCamelCase_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ : List[str] = retriever.retrieve(__magic_name__ , n_docs=1 )
self.assertTrue(out is not None )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
lowerCamelCase_ : Optional[int] = 1
lowerCamelCase_ : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__magic_name__ )
lowerCamelCase_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[int] = retriever.retrieve(__magic_name__ , n_docs=__magic_name__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__magic_name__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , __magic_name__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
lowerCamelCase_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__magic_name__ )
lowerCamelCase_ : List[Any] = RagRetriever.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
lowerCamelCase_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ : int = retriever.retrieve(__magic_name__ , n_docs=1 )
self.assertTrue(out is not None )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
lowerCamelCase_ : Union[str, Any] = 1
lowerCamelCase_ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=__magic_name__ )
lowerCamelCase_ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[str] = retriever.retrieve(__magic_name__ , n_docs=__magic_name__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__magic_name__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , __magic_name__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
lowerCamelCase_ : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__magic_name__ )
lowerCamelCase_ : str = RagRetriever.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
lowerCamelCase_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ : List[Any] = retriever.retrieve(__magic_name__ , n_docs=1 )
self.assertTrue(out is not None )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
lowerCamelCase_ : Union[str, Any] = 1
lowerCamelCase_ : Dict = self.get_dummy_legacy_index_retriever()
lowerCamelCase_ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[str] = retriever.retrieve(__magic_name__ , n_docs=__magic_name__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__magic_name__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , __magic_name__ )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
lowerCamelCase_ : List[str] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__magic_name__ )
lowerCamelCase_ : Optional[int] = RagRetriever.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
lowerCamelCase_ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ : Optional[Any] = retriever.retrieve(__magic_name__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
import torch
lowerCamelCase_ : Dict = 1
lowerCamelCase_ : List[str] = self.get_dummy_canonical_hf_index_retriever()
lowerCamelCase_ : str = [[5, 7], [10, 11]]
lowerCamelCase_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ : Dict = retriever(__magic_name__ , __magic_name__ , prefix=retriever.config.generator.prefix , n_docs=__magic_name__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Any = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__magic_name__ , __magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
self.assertIsInstance(__magic_name__ , np.ndarray )
lowerCamelCase_ : Dict = retriever(
__magic_name__ , __magic_name__ , prefix=retriever.config.generator.prefix , n_docs=__magic_name__ , return_tensors="pt" , )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[Any] = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__magic_name__ , torch.Tensor )
self.assertIsInstance(__magic_name__ , torch.Tensor )
self.assertIsInstance(__magic_name__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
lowerCamelCase_ : Optional[int] = self.get_dpr_ctx_encoder_tokenizer()
lowerCamelCase_ : Any = 1
lowerCamelCase_ : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__magic_name__ )
retriever.set_ctx_encoder_tokenizer(__magic_name__ )
lowerCamelCase_ : List[Any] = [[5, 7], [10, 11]]
lowerCamelCase_ : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ : int = retriever(__magic_name__ , __magic_name__ , prefix=retriever.config.generator.prefix , n_docs=__magic_name__ )
self.assertEqual(
len(__magic_name__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , __magic_name__ ) # check for doc token related keys in dictionary.
| 488
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : Optional[int] = "visual_bert"
def __init__( self , a__=3_05_22 , a__=7_68 , a__=5_12 , a__=12 , a__=12 , a__=30_72 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_12 , a__=2 , a__=0.02 , a__=1e-12 , a__=False , a__=True , a__=1 , a__=0 , a__=2 , **a__ , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
__snake_case :Tuple = vocab_size
__snake_case :Any = max_position_embeddings
__snake_case :int = hidden_size
__snake_case :List[str] = visual_embedding_dim
__snake_case :List[str] = num_hidden_layers
__snake_case :Tuple = num_attention_heads
__snake_case :str = intermediate_size
__snake_case :Union[str, Any] = hidden_act
__snake_case :Union[str, Any] = hidden_dropout_prob
__snake_case :str = attention_probs_dropout_prob
__snake_case :Tuple = initializer_range
__snake_case :str = type_vocab_size
__snake_case :Optional[Any] = layer_norm_eps
__snake_case :Union[str, Any] = bypass_transformer
__snake_case :Tuple = special_visual_initialize
| 291
|
def UpperCamelCase ( snake_case__ : str ):
'''simple docstring'''
__snake_case :List[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
__snake_case :str = hex_num[0] == """-"""
if is_negative:
__snake_case :List[Any] = hex_num[1:]
try:
__snake_case :Tuple = int(snake_case__ ,16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
__snake_case :int = """"""
while int_num > 0:
__snake_case :Optional[int] = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291
| 1
|
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
UpperCAmelCase_ : int = logging.get_logger(__name__)
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , **__SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
requires_backends(self , ['''bs4'''] )
super().__init__(**__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = []
__snake_case = []
__snake_case = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__snake_case = parent.find_all(child.name , recursive=__SCREAMING_SNAKE_CASE )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__SCREAMING_SNAKE_CASE ) else next(i for i, s in enumerate(__SCREAMING_SNAKE_CASE , 1 ) if s is child ) )
__snake_case = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = BeautifulSoup(__SCREAMING_SNAKE_CASE , '''html.parser''' )
__snake_case = []
__snake_case = []
__snake_case = []
for element in html_code.descendants:
if type(__SCREAMING_SNAKE_CASE ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__snake_case = html.unescape(__SCREAMING_SNAKE_CASE ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__SCREAMING_SNAKE_CASE )
__snake_case , __snake_case = self.xpath_soup(__SCREAMING_SNAKE_CASE )
stringaxtag_seq.append(__SCREAMING_SNAKE_CASE )
stringaxsubs_seq.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = ''''''
for tagname, subs in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
xpath += F'''/{tagname}'''
if subs != 0:
xpath += F'''[{subs}]'''
return xpath
def __call__( self , __SCREAMING_SNAKE_CASE ) -> BatchFeature:
'''simple docstring'''
__snake_case = False
# Check that strings has a valid type
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = True
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
if len(__SCREAMING_SNAKE_CASE ) == 0 or isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE ):
__snake_case = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F'''but is of type {type(__SCREAMING_SNAKE_CASE )}.''' )
__snake_case = bool(isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE )) )
if not is_batched:
__snake_case = [html_strings]
# Get nodes + xpaths
__snake_case = []
__snake_case = []
for html_string in html_strings:
__snake_case , __snake_case , __snake_case = self.get_three_from_single(__SCREAMING_SNAKE_CASE )
nodes.append(__SCREAMING_SNAKE_CASE )
__snake_case = []
for node, tag_list, sub_list in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = self.construct_xpath(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
xpath_strings.append(__SCREAMING_SNAKE_CASE )
xpaths.append(__SCREAMING_SNAKE_CASE )
# return as Dict
__snake_case = {'''nodes''': nodes, '''xpaths''': xpaths}
__snake_case = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_inputs
| 24
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
__snake_case = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] )-> Tuple:
'''simple docstring'''
__snake_case = dct.pop(_lowerCamelCase )
__snake_case = val
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple )-> str:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__snake_case = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
__snake_case = qkv_bias
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Tuple )-> Dict:
'''simple docstring'''
__snake_case = 3_64 if '''coco''' in model_name else 2_24
__snake_case = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__snake_case = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Any=False )-> Dict:
'''simple docstring'''
__snake_case = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__snake_case = tokenizer('''\n''' , add_special_tokens=_lowerCamelCase ).input_ids[0]
__snake_case , __snake_case = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
__snake_case = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
__snake_case = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__snake_case , __snake_case = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__snake_case , __snake_case , __snake_case = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print('''Done!''' )
# update state dict keys
__snake_case = original_model.state_dict()
__snake_case = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__snake_case = state_dict.pop(_lowerCamelCase )
if key.startswith('''Qformer.bert''' ):
__snake_case = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__snake_case = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__snake_case = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__snake_case = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__snake_case = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__snake_case = key.replace('''t5''' , '''language''' )
__snake_case = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__snake_case = load_demo_image()
__snake_case = vis_processors['''eval'''](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
__snake_case = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
# create processor
__snake_case = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
__snake_case = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
__snake_case = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
__snake_case = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__snake_case = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__snake_case = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__snake_case = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
__snake_case = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__snake_case = ''''''
__snake_case = tokenizer(_lowerCamelCase , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
__snake_case = original_model.generate({'''image''': original_pixel_values} )
__snake_case = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , _lowerCamelCase )
__snake_case = input_ids.shape[1]
__snake_case = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
__snake_case = [text.strip() for text in output_text]
print('''HF generation:''' , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
UpperCAmelCase_ : Tuple = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 24
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : Union[str, Any] = """gpt_neo"""
__snake_case : Optional[int] = ["""past_key_values"""]
__snake_case : Optional[int] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self :Any , __lowercase :Optional[Any]=5_0257 , __lowercase :Tuple=2048 , __lowercase :Optional[Any]=2048 , __lowercase :Any=24 , __lowercase :Optional[Any]=[[["global", "local"], 12]] , __lowercase :List[Any]=16 , __lowercase :Optional[int]=None , __lowercase :List[str]=256 , __lowercase :List[str]="gelu_new" , __lowercase :Union[str, Any]=0.0 , __lowercase :str=0.0 , __lowercase :int=0.0 , __lowercase :List[str]=0.1 , __lowercase :Union[str, Any]=1e-5 , __lowercase :List[Any]=0.02 , __lowercase :Dict=True , __lowercase :str=5_0256 , __lowercase :str=5_0256 , **__lowercase :Any , ):
__lowerCamelCase : Dict =vocab_size
__lowerCamelCase : Tuple =max_position_embeddings
__lowerCamelCase : Dict =hidden_size
__lowerCamelCase : Optional[Any] =num_layers
__lowerCamelCase : str =num_heads
__lowerCamelCase : Dict =intermediate_size
__lowerCamelCase : List[Any] =window_size
__lowerCamelCase : int =activation_function
__lowerCamelCase : Any =resid_dropout
__lowerCamelCase : str =embed_dropout
__lowerCamelCase : Optional[Any] =attention_dropout
__lowerCamelCase : Optional[Any] =classifier_dropout
__lowerCamelCase : Optional[Any] =layer_norm_epsilon
__lowerCamelCase : Union[str, Any] =initializer_range
__lowerCamelCase : Dict =use_cache
__lowerCamelCase : List[Any] =bos_token_id
__lowerCamelCase : Any =eos_token_id
__lowerCamelCase : str =attention_types
__lowerCamelCase : int =self.expand_attention_types_params(__lowercase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
f'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
f'`config.num_layers = {self.num_layers}`. '
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
@staticmethod
def __lowercase ( __lowercase :Optional[int] ):
__lowerCamelCase : List[Any] =[]
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
import torch
__lowerCamelCase : Optional[Any] =input.size()
__lowerCamelCase : Tuple =len(SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[Any] =shape[dimension]
__lowerCamelCase : Optional[Any] =torch.arange(0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[int] =torch.div(sizedim - size , SCREAMING_SNAKE_CASE , rounding_mode='''floor''' ) + 1
__lowerCamelCase : Tuple =torch.arange(SCREAMING_SNAKE_CASE ) + low_indices[:min_length][:, None]
__lowerCamelCase : int =[slice(SCREAMING_SNAKE_CASE )] * rank
__lowerCamelCase : List[Any] =indices
__lowerCamelCase : Dict =input[s]
__lowerCamelCase : str =list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
import torch
__lowerCamelCase : Tuple =torch.arange(1 , SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[int] =torch.remainder(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCamelCase : Any =remainders == 0
__lowerCamelCase : Optional[int] =candidates[divisor_indices]
__lowerCamelCase : List[str] =torch.max(SCREAMING_SNAKE_CASE )
return largest_divisor, torch.div(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , rounding_mode='''floor''' )
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
@property
def __lowercase ( self :List[Any] ):
__lowerCamelCase : List[str] =OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__lowercase , direction='''inputs''' )
__lowerCamelCase : str ={0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__lowerCamelCase : Union[str, Any] ={0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __lowercase ( self :List[Any] ):
return self._config.num_heads
def __lowercase ( self :Any , __lowercase :PreTrainedTokenizer , __lowercase :int = -1 , __lowercase :int = -1 , __lowercase :bool = False , __lowercase :Optional[TensorType] = None , ):
__lowerCamelCase : Dict =super(__lowercase , self ).generate_dummy_inputs(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
# We need to order the input in the way they appears in the forward()
__lowerCamelCase : Optional[int] =OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowerCamelCase , __lowerCamelCase : List[Any] =common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowerCamelCase : str =seqlen + 2
__lowerCamelCase : int =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCamelCase : int =[
(torch.zeros(__lowercase ), torch.zeros(__lowercase )) for _ in range(self.num_layers )
]
__lowerCamelCase : Optional[Any] =common_inputs['''attention_mask''']
if self.use_past:
__lowerCamelCase : str =ordered_inputs['''attention_mask'''].dtype
__lowerCamelCase : Any =torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__lowercase , __lowercase , dtype=__lowercase )] , dim=1 )
return ordered_inputs
@property
def __lowercase ( self :Tuple ):
return 13
| 363
|
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_UpperCamelCase = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
_UpperCamelCase = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
_UpperCamelCase = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self :Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def __lowercase ( self :Optional[Any] , __lowercase :List[List[List[str]]] , __lowercase :List[List[str]] , __lowercase :int = 1 , __lowercase :int = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__lowercase , hypotheses=__lowercase , min_len=__lowercase , max_len=__lowercase )
}
| 363
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
__A = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 1_8, 2]
__A = True if 'large' in model_name or 'huge' in model_name else False
__A = True if 'large' in model_name or 'huge' in model_name else False
__A = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__A = [3, 3, 3, 3]
__A = [5, 5, 5, 5]
elif "fl4" in model_name:
__A = [4, 4, 4, 4]
__A = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__A = [3, 3, 3, 3]
if "lrf" in model_name:
__A = [3, 3, 3, 3]
else:
__A = [2, 2, 2, 2]
if "tiny" in model_name:
__A = 9_6
elif "small" in model_name:
__A = 9_6
elif "base" in model_name:
__A = 1_2_8
elif "large" in model_name:
__A = 1_9_2
elif "xlarge" in model_name:
__A = 2_5_6
elif "huge" in model_name:
__A = 3_5_2
# set label information
__A = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__A = 'imagenet-22k-id2label.json'
else:
__A = 'imagenet-1k-id2label.json'
__A = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="dataset" ) , "r" ) )
__A = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
__A = {v: k for k, v in idalabel.items()}
__A = FocalNetConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , focal_levels=lowerCamelCase_ , focal_windows=lowerCamelCase_ , use_conv_embed=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , use_post_layernorm=lowerCamelCase_ , use_layerscale=lowerCamelCase_ , )
return config
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
if "patch_embed.proj" in name:
__A = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__A = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
__A = 'encoder.' + name
if "encoder.layers" in name:
__A = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
__A = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
__A = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__A = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__A = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__A = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
__A = 'layernorm.weight'
if name == "norm.bias":
__A = 'layernorm.bias'
if "head" in name:
__A = name.replace("head" , "classifier" )
else:
__A = 'focalnet.' + name
return name
def UpperCAmelCase ( a_ , a_ , a_=False ) -> str:
"""simple docstring"""
__A = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__A = model_name_to_url[model_name]
print("Checkpoint URL: " , lowerCamelCase_ )
__A = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="cpu" )['model']
# rename keys
for key in state_dict.copy().keys():
__A = state_dict.pop(lowerCamelCase_ )
__A = val
__A = get_focalnet_config(lowerCamelCase_ )
__A = FocalNetForImageClassification(lowerCamelCase_ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase_ )
# verify conversion
__A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__A = BitImageProcessor(
do_resize=lowerCamelCase_ , size={"shortest_edge": 2_5_6} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase_ , crop_size=2_2_4 , do_normalize=lowerCamelCase_ , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ , )
__A = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
__A = processor(images=lowerCamelCase_ , return_tensors="pt" )
__A = transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__A = image_transforms(lowerCamelCase_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase_ , atol=1E-4 )
__A = model(**lowerCamelCase_ )
__A = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__A = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
__A = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
__A = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
__A = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
__A = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
__A = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
SCREAMING_SNAKE_CASE :Optional[Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 55
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
_lowercase : Tuple = True if 'large' in model_name or 'huge' in model_name else False
_lowercase : Any = True if 'large' in model_name or 'huge' in model_name else False
_lowercase : Dict = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_lowercase : Any = [3, 3, 3, 3]
_lowercase : Any = [5, 5, 5, 5]
elif "fl4" in model_name:
_lowercase : Dict = [4, 4, 4, 4]
_lowercase : Tuple = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_lowercase : str = [3, 3, 3, 3]
if "lrf" in model_name:
_lowercase : Optional[int] = [3, 3, 3, 3]
else:
_lowercase : Dict = [2, 2, 2, 2]
if "tiny" in model_name:
_lowercase : List[str] = 96
elif "small" in model_name:
_lowercase : Dict = 96
elif "base" in model_name:
_lowercase : Optional[int] = 128
elif "large" in model_name:
_lowercase : List[Any] = 192
elif "xlarge" in model_name:
_lowercase : Optional[Any] = 256
elif "huge" in model_name:
_lowercase : Dict = 352
# set label information
_lowercase : int = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
_lowercase : str = 'imagenet-22k-id2label.json'
else:
_lowercase : Tuple = 'imagenet-1k-id2label.json'
_lowercase : Union[str, Any] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : Any = {v: k for k, v in idalabel.items()}
_lowercase : Optional[Any] = FocalNetConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , focal_levels=lowerCamelCase_ , focal_windows=lowerCamelCase_ , use_conv_embed=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , use_post_layernorm=lowerCamelCase_ , use_layerscale=lowerCamelCase_ , )
return config
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
if "patch_embed.proj" in name:
_lowercase : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : str = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_lowercase : Any = 'encoder.' + name
if "encoder.layers" in name:
_lowercase : int = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
_lowercase : Tuple = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
_lowercase : str = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_lowercase : List[str] = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_lowercase : int = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_lowercase : Any = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
_lowercase : Any = 'layernorm.weight'
if name == "norm.bias":
_lowercase : Tuple = 'layernorm.bias'
if "head" in name:
_lowercase : Optional[int] = name.replace('head' , 'classifier' )
else:
_lowercase : Optional[int] = 'focalnet.' + name
return name
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> str:
# fmt: off
_lowercase : Dict = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
_lowercase : Dict = model_name_to_url[model_name]
print('Checkpoint URL: ' , lowerCamelCase_ )
_lowercase : List[str] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[int] = val
_lowercase : Union[str, Any] = get_focalnet_config(lowerCamelCase_ )
_lowercase : Optional[Any] = FocalNetForImageClassification(lowerCamelCase_ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase_ )
# verify conversion
_lowercase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : Any = BitImageProcessor(
do_resize=lowerCamelCase_ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase_ , crop_size=224 , do_normalize=lowerCamelCase_ , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ , )
_lowercase : List[str] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
_lowercase : List[Any] = processor(images=lowerCamelCase_ , return_tensors='pt' )
_lowercase : str = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
_lowercase : List[str] = image_transforms(lowerCamelCase_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase_ , atol=1e-4 )
_lowercase : Dict = model(**lowerCamelCase_ )
_lowercase : int = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_lowercase : Optional[Any] = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
_lowercase : int = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
_lowercase : str = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
_lowercase : Any = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
_lowercase : List[Any] = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
_lowercase : int = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89
| 0
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCAmelCase =RobertaTokenizer
_lowerCAmelCase =RobertaTokenizerFast
_lowerCAmelCase =True
_lowerCAmelCase ={"cls_token": "<s>"}
def UpperCAmelCase__ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : List[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
snake_case__ : Optional[Any] = dict(zip(A_ , range(len(A_ ) ) ) )
snake_case__ : Union[str, Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
snake_case__ : Optional[int] = {'unk_token': '<unk>'}
snake_case__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A_ ) )
def UpperCAmelCase__ ( self : List[Any] , **_lowerCamelCase : str ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A_ )
def UpperCAmelCase__ ( self : List[str] , **_lowerCamelCase : Dict ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def UpperCAmelCase__ ( self : Optional[int] , _lowerCamelCase : str ):
snake_case__ : Any = 'lower newer'
snake_case__ : Optional[Any] = 'lower newer'
return input_text, output_text
def UpperCAmelCase__ ( self : Union[str, Any] ):
snake_case__ : int = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : Tuple = 'lower newer'
snake_case__ : List[Any] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
snake_case__ : Union[str, Any] = tokenizer.tokenize(A_ ) # , add_prefix_space=True)
self.assertListEqual(A_ , A_ )
snake_case__ : int = tokens + [tokenizer.unk_token]
snake_case__ : Tuple = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
snake_case__ : str = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=A_ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=A_ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def UpperCAmelCase__ ( self : int ):
snake_case__ : Tuple = self.tokenizer_class.from_pretrained('roberta-base' )
snake_case__ : Optional[int] = tokenizer.encode('sequence builders' , add_special_tokens=A_ )
snake_case__ : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=A_ )
snake_case__ : Union[str, Any] = tokenizer.encode(
'sequence builders' , add_special_tokens=A_ , add_prefix_space=A_ )
snake_case__ : Union[str, Any] = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=A_ , add_prefix_space=A_ )
snake_case__ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A_ )
snake_case__ : List[Any] = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCAmelCase__ ( self : Any ):
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : List[Any] = 'Encode this sequence.'
snake_case__ : List[Any] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
snake_case__ : Tuple = tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_ )
snake_case__ : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A_ , A_ )
snake_case__ : Optional[Any] = tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_ )
snake_case__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A_ , A_ )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
snake_case__ : Tuple = tokenizer.encode(A_ , add_special_tokens=A_ )
snake_case__ : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A_ , A_ )
# Testing spaces after special tokens
snake_case__ : int = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(A_ , lstrip=A_ , rstrip=A_ )} ) # mask token has a left space
snake_case__ : Dict = tokenizer.convert_tokens_to_ids(A_ )
snake_case__ : Optional[Any] = 'Encode <mask> sequence'
snake_case__ : Optional[Any] = 'Encode <mask>sequence'
snake_case__ : Dict = tokenizer.encode(A_ )
snake_case__ : Tuple = encoded.index(A_ )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A_ , A_ )
snake_case__ : Optional[int] = tokenizer.encode(A_ )
snake_case__ : Tuple = encoded.index(A_ )
snake_case__ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A_ , A_ )
def UpperCAmelCase__ ( self : List[str] ):
pass
def UpperCAmelCase__ ( self : Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
snake_case__ : Union[str, Any] = self.tokenizer_class.from_pretrained(A_ , **A_ )
snake_case__ : List[str] = 'A, <mask> AllenNLP sentence.'
snake_case__ : Optional[int] = tokenizer_r.encode_plus(A_ , add_special_tokens=A_ , return_token_type_ids=A_ )
snake_case__ : List[Any] = tokenizer_p.encode_plus(A_ , add_special_tokens=A_ , return_token_type_ids=A_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
snake_case__ : int = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
snake_case__ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
A_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
A_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
snake_case__ : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case__ : int = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , A_ )
self.assertEqual(post_processor_state['add_prefix_space'] , A_ )
self.assertEqual(post_processor_state['trim_offsets'] , A_ )
def UpperCAmelCase__ ( self : int ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case__ : Union[str, Any] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
snake_case__ : Optional[int] = F'''{text_of_1_token} {text_of_1_token}'''
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
snake_case__ : List[Any] = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A_ ) + 1, len(A_ ) + 1 + len(A_ )) , )
snake_case__ : List[str] = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
snake_case__ : Union[str, Any] = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A_ ) + 1, len(A_ ) + 1 + len(A_ )) , )
snake_case__ : int = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
snake_case__ : Any = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A_ ), len(A_ ) + 1 + len(A_ )) , )
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
snake_case__ : List[Any] = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A_ ), len(A_ ) + 1 + len(A_ )) , )
snake_case__ : Optional[Any] = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
snake_case__ : Optional[int] = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A_ ) + 1, 1 + len(A_ ) + 1 + len(A_ )) , )
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
snake_case__ : Optional[int] = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A_ ), 1 + len(A_ ) + 1 + len(A_ )) , )
snake_case__ : int = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
snake_case__ : List[Any] = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A_ ), 1 + len(A_ ) + 1 + len(A_ )) , )
| 703
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCamelCase : Any = get_tests_dir('fixtures')
class snake_case__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : str ):
# A mock response for an HTTP head request to emulate server down
snake_case__ : int = mock.Mock()
snake_case__ : Any = 5_0_0
snake_case__ : Dict = {}
snake_case__ : List[str] = HTTPError
snake_case__ : str = {}
# Download this model to make sure it's in the cache.
snake_case__ : List[str] = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=_lowerCamelCase ) as mock_head:
snake_case__ : Dict = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ ( self : Optional[Any] ):
# This test is for deprecated behavior and can be removed in v5
snake_case__ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class snake_case__ ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ ( cls : str ):
snake_case__ : Tuple = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def UpperCAmelCase__ ( cls : int ):
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def UpperCAmelCase__ ( self : int ):
snake_case__ : Dict = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
snake_case__ : Any = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCamelCase , repo_id='test-feature-extractor' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
snake_case__ : Any = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def UpperCAmelCase__ ( self : List[str] ):
snake_case__ : str = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
snake_case__ : Dict = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCamelCase , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
snake_case__ : str = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def UpperCAmelCase__ ( self : List[Any] ):
CustomFeatureExtractor.register_for_auto_class()
snake_case__ : Optional[int] = CustomFeatureExtractor.from_pretrained(_lowerCamelCase )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
snake_case__ : int = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
| 303
| 0
|
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__lowerCAmelCase = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def _UpperCAmelCase ( __A : List[str] , __A : Dict ):
warnings.warn(lowercase_ , lowercase_ )
requires_backends(lowercase_ , '''sklearn''' )
return (preds == labels).mean()
def _UpperCAmelCase ( __A : Dict , __A : Union[str, Any] ):
warnings.warn(lowercase_ , lowercase_ )
requires_backends(lowercase_ , '''sklearn''' )
a_ : List[Any] = simple_accuracy(lowercase_ , lowercase_ )
a_ : Union[str, Any] = fa_score(y_true=lowercase_ , y_pred=lowercase_ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _UpperCAmelCase ( __A : str , __A : List[Any] ):
warnings.warn(lowercase_ , lowercase_ )
requires_backends(lowercase_ , '''sklearn''' )
a_ : Tuple = pearsonr(lowercase_ , lowercase_ )[0]
a_ : List[Any] = spearmanr(lowercase_ , lowercase_ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _UpperCAmelCase ( __A : Union[str, Any] , __A : str , __A : Dict ):
warnings.warn(lowercase_ , lowercase_ )
requires_backends(lowercase_ , '''sklearn''' )
assert len(lowercase_ ) == len(lowercase_ ), f'Predictions and labels have mismatched lengths {len(lowercase_ )} and {len(lowercase_ )}'
if task_name == "cola":
return {"mcc": matthews_corrcoef(lowercase_ , lowercase_ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(lowercase_ , lowercase_ )}
elif task_name == "mrpc":
return acc_and_fa(lowercase_ , lowercase_ )
elif task_name == "sts-b":
return pearson_and_spearman(lowercase_ , lowercase_ )
elif task_name == "qqp":
return acc_and_fa(lowercase_ , lowercase_ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(lowercase_ , lowercase_ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(lowercase_ , lowercase_ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(lowercase_ , lowercase_ )}
elif task_name == "rte":
return {"acc": simple_accuracy(lowercase_ , lowercase_ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(lowercase_ , lowercase_ )}
elif task_name == "hans":
return {"acc": simple_accuracy(lowercase_ , lowercase_ )}
else:
raise KeyError(lowercase_ )
def _UpperCAmelCase ( __A : str , __A : Union[str, Any] , __A : List[str] ):
warnings.warn(lowercase_ , lowercase_ )
requires_backends(lowercase_ , '''sklearn''' )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(f'Predictions and labels have mismatched lengths {len(lowercase_ )} and {len(lowercase_ )}' )
if task_name == "xnli":
return {"acc": simple_accuracy(lowercase_ , lowercase_ )}
else:
raise KeyError(lowercase_ )
| 466
|
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
__SCREAMING_SNAKE_CASE : str = tuple[int, int]
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = vertices
_lowerCamelCase = {
(min(lowerCamelCase__ ), max(lowerCamelCase__ )): weight for edge, weight in edges.items()
}
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_lowerCamelCase = weight
def snake_case__ ( self ):
_lowerCamelCase = Graph({min(self.vertices )} , {} )
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
while len(subgraph.vertices ) < len(self.vertices ):
_lowerCamelCase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_lowerCamelCase = edge
_lowerCamelCase = weight
subgraph.add_edge(lowerCamelCase__ , lowerCamelCase__ )
return subgraph
def lowerCAmelCase_( lowercase_ : str = "p107_network.txt" ) -> int:
_lowerCamelCase = os.path.abspath(os.path.dirname(lowercase_ ) )
_lowerCamelCase = os.path.join(lowercase_ , lowercase_ )
_lowerCamelCase = {}
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
with open(lowercase_ ) as f:
_lowerCamelCase = f.read().strip().split('''\n''' )
_lowerCamelCase = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(lowercase_ ) ):
for edgea in range(lowercase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
_lowerCamelCase = int(adjaceny_matrix[edgea][edgea] )
_lowerCamelCase = Graph(set(range(len(lowercase_ ) ) ) , lowercase_ )
_lowerCamelCase = graph.prims_algorithm()
_lowerCamelCase = sum(graph.edges.values() )
_lowerCamelCase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 661
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : int = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 146
|
from __future__ import annotations
from math import pow, sqrt
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> dict[str, float]:
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCamelCase__ , 2 ) + pow(UpperCamelCase__ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 146
| 1
|
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,lowerCAmelCase_ ):
@register_to_config
def __init__( self : Any , *,
A : int = 4 , A : int = 7_6_8 , A : int , A : Tuple , ) ->Any:
super().__init__()
lowerCamelCase__ : List[Any] = nn.Parameter(torch.zeros(A ) )
# parameters for additional clip time embeddings
lowerCamelCase__ : Dict = nn.Linear(A , A )
lowerCamelCase__ : List[str] = nn.Linear(A , A )
# parameters for encoder hidden states
lowerCamelCase__ : List[Any] = clip_extra_context_tokens
lowerCamelCase__ : Optional[Any] = nn.Linear(
A , self.clip_extra_context_tokens * cross_attention_dim )
lowerCamelCase__ : Optional[Any] = nn.Linear(A , A )
lowerCamelCase__ : Optional[int] = nn.LayerNorm(A )
def __lowerCamelCase ( self : int , *, A : List[str] , A : Optional[int] , A : Tuple , A : int ) ->Optional[Any]:
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
lowerCamelCase__ : Union[str, Any] = image_embeddings.shape[0]
lowerCamelCase__ : str = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
lowerCamelCase__ : Union[str, Any] = classifier_free_guidance_embeddings.expand(
A , -1 )
lowerCamelCase__ : str = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
lowerCamelCase__ : str = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
lowerCamelCase__ : Tuple = self.embedding_proj(A )
lowerCamelCase__ : Any = self.clip_image_embeddings_project_to_time_embeddings(A )
lowerCamelCase__ : Tuple = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
lowerCamelCase__ : Any = self.clip_extra_context_tokens_proj(A )
lowerCamelCase__ : Optional[Any] = clip_extra_context_tokens.reshape(A , -1 , self.clip_extra_context_tokens )
lowerCamelCase__ : List[Any] = clip_extra_context_tokens.permute(0 , 2 , 1 )
lowerCamelCase__ : Union[str, Any] = self.encoder_hidden_states_proj(A )
lowerCamelCase__ : str = self.text_encoder_hidden_states_norm(A )
lowerCamelCase__ : Optional[Any] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 315
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Dict = logging.get_logger(__name__)
_A : Union[str, Any] = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = "luke"
def __init__( self : int , A : Optional[int]=5_0_2_6_7 , A : Any=5_0_0_0_0_0 , A : Tuple=7_6_8 , A : List[Any]=2_5_6 , A : Any=1_2 , A : List[Any]=1_2 , A : Tuple=3_0_7_2 , A : str="gelu" , A : Optional[int]=0.1 , A : Tuple=0.1 , A : List[Any]=5_1_2 , A : Optional[int]=2 , A : Dict=0.02 , A : Union[str, Any]=1e-12 , A : Dict=True , A : Optional[Any]=None , A : Dict=1 , A : str=0 , A : int=2 , **A : Optional[int] , ) ->Optional[int]:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Dict = entity_vocab_size
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : Optional[int] = entity_emb_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : List[str] = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : str = initializer_range
lowerCamelCase__ : Optional[int] = layer_norm_eps
lowerCamelCase__ : Dict = use_entity_aware_attention
lowerCamelCase__ : Optional[int] = classifier_dropout
| 315
| 1
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCamelCase (unittest.TestCase ):
def __snake_case ( self :Union[str, Any] , __magic_name__ :Union[str, Any] ) ->List[Any]:
lowercase : Dict = 3
lowercase : Optional[Any] = 250
lowercase : int = ids_tensor((batch_size, length) , __magic_name__ )
lowercase : Tuple = torch.ones((batch_size, length) , device=__magic_name__ , dtype=torch.float ) / length
return input_ids, scores
def __snake_case ( self :Union[str, Any] ) ->Any:
lowercase , lowercase : Any = self._get_tensors(5 )
lowercase : str = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(__magic_name__ , __magic_name__ ) )
lowercase , lowercase : List[str] = self._get_tensors(9 )
self.assertFalse(criteria(__magic_name__ , __magic_name__ ) )
lowercase , lowercase : Dict = self._get_tensors(10 )
self.assertTrue(criteria(__magic_name__ , __magic_name__ ) )
def __snake_case ( self :int ) ->List[Any]:
lowercase : Union[str, Any] = MaxLengthCriteria(max_length=10 )
lowercase , lowercase : str = self._get_tensors(5 )
self.assertFalse(criteria(__magic_name__ , __magic_name__ ) )
lowercase , lowercase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(__magic_name__ , __magic_name__ ) )
lowercase , lowercase : Any = self._get_tensors(10 )
self.assertTrue(criteria(__magic_name__ , __magic_name__ ) )
def __snake_case ( self :Optional[int] ) ->Tuple:
lowercase : str = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowercase , lowercase : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(__magic_name__ , __magic_name__ ) )
lowercase , lowercase : int = self._get_tensors(9 )
self.assertFalse(criteria(__magic_name__ , __magic_name__ ) )
lowercase , lowercase : Dict = self._get_tensors(10 )
self.assertTrue(criteria(__magic_name__ , __magic_name__ ) )
lowercase : Tuple = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __snake_case ( self :List[Any] ) ->Tuple:
lowercase , lowercase : Union[str, Any] = self._get_tensors(5 )
lowercase : Dict = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(__magic_name__ , __magic_name__ ) )
lowercase : List[str] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(__magic_name__ , __magic_name__ ) )
def __snake_case ( self :int ) ->int:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(__magic_name__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
lowercase : Any = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(__magic_name__ ) , 1 )
| 717
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class UpperCamelCase (__snake_case ):
_SCREAMING_SNAKE_CASE : Dict = """van"""
def __init__( self :List[Any] , __magic_name__ :Optional[Any]=224 , __magic_name__ :Union[str, Any]=3 , __magic_name__ :Dict=[7, 3, 3, 3] , __magic_name__ :int=[4, 2, 2, 2] , __magic_name__ :Optional[Any]=[64, 128, 320, 512] , __magic_name__ :List[str]=[3, 3, 12, 3] , __magic_name__ :Any=[8, 8, 4, 4] , __magic_name__ :str="gelu" , __magic_name__ :int=0.02 , __magic_name__ :List[Any]=1E-6 , __magic_name__ :Optional[Any]=1E-2 , __magic_name__ :Optional[Any]=0.0 , __magic_name__ :Any=0.0 , **__magic_name__ :Optional[Any] , ) ->Any:
super().__init__(**__magic_name__ )
lowercase : Dict = image_size
lowercase : Optional[int] = num_channels
lowercase : List[str] = patch_sizes
lowercase : List[Any] = strides
lowercase : List[Any] = hidden_sizes
lowercase : int = depths
lowercase : str = mlp_ratios
lowercase : Optional[Any] = hidden_act
lowercase : Optional[Any] = initializer_range
lowercase : Optional[Any] = layer_norm_eps
lowercase : Optional[int] = layer_scale_init_value
lowercase : Union[str, Any] = drop_path_rate
lowercase : Tuple = dropout_rate
| 348
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class A_ ( _a ):
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCAmelCase ,"hidden_sizes" ) )
self.parent.assertTrue(hasattr(__lowerCAmelCase ,"neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(__lowerCAmelCase ,"num_attention_heads" ) )
class A_ :
def __init__( self: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[Any]=13 ,__lowerCAmelCase: Any=32 ,__lowerCAmelCase: Optional[Any]=2 ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: Any=640 ,__lowerCAmelCase: List[str]=4 ,__lowerCAmelCase: Dict="silu" ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: str=32 ,__lowerCAmelCase: Dict=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: int=True ,__lowerCAmelCase: int=True ,__lowerCAmelCase: str=10 ,__lowerCAmelCase: List[str]=None ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : int = image_size
_lowerCamelCase : int = patch_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Optional[Any] = last_hidden_size
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : Union[str, Any] = conv_kernel_size
_lowerCamelCase : Any = output_stride
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : Optional[int] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = classifier_dropout_prob
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : Dict = is_training
_lowerCamelCase : int = num_labels
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Optional[int] = scope
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Dict = None
_lowerCamelCase : Union[str, Any] = None
if self.use_labels:
_lowerCamelCase : Dict = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
_lowerCamelCase : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowercase ( self: str ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = MobileViTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.num_labels
_lowerCamelCase : str = MobileViTForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[str] = model(__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: int ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = self.num_labels
_lowerCamelCase : Tuple = MobileViTForSemanticSegmentation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
_lowerCamelCase : Tuple = model(__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = config_and_inputs
_lowerCamelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = MobileViTModelTester(self )
_lowerCamelCase : Optional[int] = MobileViTConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def _lowercase ( self: List[str] ):
'''simple docstring'''
pass
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : List[str] = [*signature.parameters.keys()]
_lowerCamelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
pass
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: Tuple ):
'''simple docstring'''
def check_hidden_states_output(__lowerCAmelCase: int ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: int ):
_lowerCamelCase : str = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : int = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : Dict = outputs.hidden_states
_lowerCamelCase : Optional[Any] = 5
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowerCamelCase : int = 2
for i in range(len(__lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,)
divisor *= 2
self.assertEqual(self.model_tester.output_stride ,divisor // 2 )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = True
check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Any = True
check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase )
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Union[str, Any] = MobileViTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: Any ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Tuple = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(__lowerCAmelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Union[str, Any] = prepare_img()
_lowerCamelCase : List[Any] = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Any = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
@slow
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : str = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
_lowerCamelCase : Any = model.to(__lowerCAmelCase )
_lowerCamelCase : Tuple = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
_lowerCamelCase : Any = prepare_img()
_lowerCamelCase : int = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : List[str] = model(**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = outputs.logits
# verify the logits
_lowerCamelCase : List[str] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Dict = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
] ,device=__lowerCAmelCase ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
@slow
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : str = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
_lowerCamelCase : Optional[Any] = model.to(__lowerCAmelCase )
_lowerCamelCase : Dict = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : Any = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = outputs.logits.detach().cpu()
_lowerCamelCase : Dict = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase ,target_sizes=[(50, 60)] )
_lowerCamelCase : Optional[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape ,__lowerCAmelCase )
_lowerCamelCase : Any = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase )
_lowerCamelCase : List[str] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape ,__lowerCAmelCase )
| 46
|
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class snake_case__ :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = field(
metadata={"""help""": """The output directory where the model will be written."""} , )
_SCREAMING_SNAKE_CASE = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} , )
_SCREAMING_SNAKE_CASE = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} , )
_SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
_SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def lowercase_ ():
snake_case__ : Optional[int] = HfArgumentParser((ModelArguments,) )
((snake_case__) , ) : List[Any] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
snake_case__ : Any = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
snake_case__ : Dict = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
snake_case__ : str = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
snake_case__ : List[str] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
snake_case__ : Dict = True
snake_case__ : int = True
snake_case__ : Optional[int] = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=A , decoder_config=A , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
snake_case__ : Optional[int] = decoder_config.decoder_start_token_id
snake_case__ : Tuple = decoder_config.pad_token_id
if decoder_start_token_id is None:
snake_case__ : List[str] = decoder_config.bos_token_id
if pad_token_id is None:
snake_case__ : str = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
snake_case__ : Optional[Any] = decoder_config.eos_token_id
snake_case__ : List[str] = decoder_start_token_id
snake_case__ : Optional[int] = pad_token_id
snake_case__ : Any = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
snake_case__ : Tuple = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
snake_case__ : Any = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 478
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_lowerCamelCase = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
lowerCamelCase__ = ['''input_features''', '''attention_mask''']
def __init__( self :Any , _lowerCamelCase :str=8_0 , _lowerCamelCase :Dict=1_6_0_0_0 , _lowerCamelCase :Dict=0.0 , _lowerCamelCase :Optional[int]=1_0 , _lowerCamelCase :Optional[int]=2_5 , _lowerCamelCase :Optional[Any]="hamming_window" , _lowerCamelCase :Optional[int]=3_2_7_6_8.0 , _lowerCamelCase :List[str]=0.9_7 , _lowerCamelCase :Union[str, Any]=1.0 , _lowerCamelCase :Tuple=True , _lowerCamelCase :Dict=True , _lowerCamelCase :str=False , **_lowerCamelCase :str , ):
super().__init__(feature_size=_lowerCamelCase , sampling_rate=_lowerCamelCase , padding_value=_lowerCamelCase , **_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = feature_size
__SCREAMING_SNAKE_CASE : str = sampling_rate
__SCREAMING_SNAKE_CASE : int = padding_value
__SCREAMING_SNAKE_CASE : Optional[Any] = hop_length
__SCREAMING_SNAKE_CASE : Union[str, Any] = win_length
__SCREAMING_SNAKE_CASE : Optional[Any] = frame_signal_scale
__SCREAMING_SNAKE_CASE : Optional[Any] = preemphasis_coeff
__SCREAMING_SNAKE_CASE : int = mel_floor
__SCREAMING_SNAKE_CASE : str = normalize_means
__SCREAMING_SNAKE_CASE : int = normalize_vars
__SCREAMING_SNAKE_CASE : List[str] = win_function
__SCREAMING_SNAKE_CASE : List[str] = return_attention_mask
__SCREAMING_SNAKE_CASE : Tuple = win_length * sampling_rate // 1_0_0_0
__SCREAMING_SNAKE_CASE : int = hop_length * sampling_rate // 1_0_0_0
__SCREAMING_SNAKE_CASE : Optional[Any] = optimal_fft_length(self.sample_size )
__SCREAMING_SNAKE_CASE : List[str] = (self.n_fft // 2) + 1
def SCREAMING_SNAKE_CASE_ ( self :int , _lowerCamelCase :np.array ):
if self.win_function == "hamming_window":
__SCREAMING_SNAKE_CASE : List[str] = window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE : int = window_function(window_length=self.sample_size , name=self.win_function )
__SCREAMING_SNAKE_CASE : int = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
__SCREAMING_SNAKE_CASE : str = spectrogram(
one_waveform * self.frame_signal_scale , window=_lowerCamelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_lowerCamelCase , preemphasis=self.preemphasis_coeff , mel_filters=_lowerCamelCase , mel_floor=self.mel_floor , log_mel='''log''' , )
return msfc_features.T
def SCREAMING_SNAKE_CASE_ ( self :Dict , _lowerCamelCase :Tuple , _lowerCamelCase :List[Any] , _lowerCamelCase :Any ):
# make sure we normalize float32 arrays
if self.normalize_means:
__SCREAMING_SNAKE_CASE : Union[str, Any] = x[:input_length].mean(axis=0 )
__SCREAMING_SNAKE_CASE : List[Any] = np.subtract(_lowerCamelCase , _lowerCamelCase )
if self.normalize_vars:
__SCREAMING_SNAKE_CASE : Dict = x[:input_length].std(axis=0 )
__SCREAMING_SNAKE_CASE : Optional[int] = np.divide(_lowerCamelCase , _lowerCamelCase )
if input_length < x.shape[0]:
__SCREAMING_SNAKE_CASE : Any = padding_value
# make sure array is in float32
__SCREAMING_SNAKE_CASE : int = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , _lowerCamelCase :List[np.ndarray] , _lowerCamelCase :Optional[np.ndarray] = None ):
__SCREAMING_SNAKE_CASE : str = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_lowerCamelCase , _lowerCamelCase , self.padding_value ) for x, n in zip(_lowerCamelCase , _lowerCamelCase )]
def __call__( self :Dict , _lowerCamelCase :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _lowerCamelCase :Union[bool, str, PaddingStrategy] = False , _lowerCamelCase :Optional[int] = None , _lowerCamelCase :bool = False , _lowerCamelCase :Optional[int] = None , _lowerCamelCase :Optional[bool] = None , _lowerCamelCase :Optional[Union[str, TensorType]] = None , _lowerCamelCase :Optional[int] = None , **_lowerCamelCase :Union[str, Any] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__SCREAMING_SNAKE_CASE : List[str] = isinstance(_lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = is_batched_numpy or (
isinstance(_lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__SCREAMING_SNAKE_CASE : List[Any] = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowerCamelCase , np.ndarray ):
__SCREAMING_SNAKE_CASE : Any = np.asarray(_lowerCamelCase , dtype=np.floataa )
elif isinstance(_lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE : Optional[int] = [raw_speech]
# extract fbank features
__SCREAMING_SNAKE_CASE : Any = [self._extract_mfsc_features(_lowerCamelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
__SCREAMING_SNAKE_CASE : Union[str, Any] = BatchFeature({'''input_features''': features} )
__SCREAMING_SNAKE_CASE : Dict = self.pad(
_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , truncation=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
# make sure list is in array format
__SCREAMING_SNAKE_CASE : Optional[int] = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , _lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for feature in input_features]
__SCREAMING_SNAKE_CASE : str = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(_lowerCamelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
__SCREAMING_SNAKE_CASE : List[Any] = (
np.array(_lowerCamelCase , dtype=np.intaa )
if self._get_padding_strategies(_lowerCamelCase , max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.normalize(
padded_inputs['''input_features'''] , attention_mask=_lowerCamelCase )
if return_tensors is not None:
__SCREAMING_SNAKE_CASE : List[Any] = padded_inputs.convert_to_tensors(_lowerCamelCase )
return padded_inputs
| 704
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCAmelCase_ ( lowercase_ : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def lowerCAmelCase_ ( lowercase_ : np.ndarray , lowercase_ : np.ndarray ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = XGBClassifier()
classifier.fit(lowercase_ , lowercase_ )
return classifier
def lowerCAmelCase_ ( ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = load_iris()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = data_handling(lowercase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = train_test_split(
lowercase_ , lowercase_ , test_size=0.25 )
__SCREAMING_SNAKE_CASE : Any = iris['''target_names''']
# Create an XGBoost Classifier from the training data
__SCREAMING_SNAKE_CASE : Union[str, Any] = xgboost(lowercase_ , lowercase_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase_ , lowercase_ , lowercase_ , display_labels=lowercase_ , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 401
| 0
|
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__magic_name__ : Optional[Any] =[redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
__magic_name__ : List[Any] =1 - (matter_density + radiation_density + dark_energy)
__magic_name__ : str =(
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__magic_name__ : List[Any] =hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
UpperCAmelCase_ : Union[str, Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 21
|
UpperCAmelCase_ : Tuple = 0 # The first color of the flag.
UpperCAmelCase_ : Any = 1 # The second color of the flag.
UpperCAmelCase_ : str = 2 # The third color of the flag.
UpperCAmelCase_ : Tuple = (red, white, blue)
def lowerCAmelCase_ ( lowerCamelCase ):
if not sequence:
return []
if len(lowerCamelCase ) == 1:
return list(lowerCamelCase )
__magic_name__ : int =0
__magic_name__ : str =len(lowerCamelCase ) - 1
__magic_name__ : Optional[Any] =0
while mid <= high:
if sequence[mid] == colors[0]:
__magic_name__ , __magic_name__ : Tuple =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__magic_name__ , __magic_name__ : Optional[Any] =sequence[high], sequence[mid]
high -= 1
else:
__magic_name__ : Optional[int] =F"The elements inside the sequence must contains only {colors} values"
raise ValueError(lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[Any] = input("Enter numbers separated by commas:\n").strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 21
| 1
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def lowerCamelCase_ ( self , lowercase=0) -> str:
'''simple docstring'''
a__: Optional[Any] = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(lowercase))
a__: int = np.random.RandomState(lowercase)
a__: Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowercase)
a__: Dict = self.get_dummy_inputs()
a__: List[Any] = pipe(**lowercase).images
a__: str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
a__: Dict = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
a__: Optional[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__: str = self.get_dummy_inputs()
a__: Union[str, Any] = pipe(**lowercase).images
a__: Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a__: Union[str, Any] = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
a__: str = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase)
# warmup pass to apply optimizations
a__: List[str] = pipe(**self.get_dummy_inputs())
a__: int = self.get_dummy_inputs()
a__: int = pipe(**lowercase).images
a__: Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a__: Union[str, Any] = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
a__: str = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase)
a__: Optional[int] = self.get_dummy_inputs()
a__: Any = pipe(**lowercase).images
a__: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a__: int = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
a__: Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase)
a__: Any = self.get_dummy_inputs()
a__: Dict = pipe(**lowercase).images
a__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a__: Any = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
a__: str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase)
a__: List[str] = self.get_dummy_inputs()
a__: str = pipe(**lowercase).images
a__: Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a__: List[Any] = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Union[str, Any] = ort.SessionOptions()
a__: str = False
return options
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
a__: Union[str, Any] = init_image.resize((7_68, 5_12))
# using the PNDM scheduler by default
a__: Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowercase , feature_extractor=lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase)
a__: Optional[Any] = 'A fantasy landscape, trending on artstation'
a__: Optional[int] = np.random.RandomState(0)
a__: List[str] = pipe(
prompt=lowercase , image=lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase , output_type='np' , )
a__: int = output.images
a__: Union[str, Any] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
a__: Dict = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
a__: Optional[int] = init_image.resize((7_68, 5_12))
a__: Tuple = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx')
a__: List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase)
a__: Union[str, Any] = 'A fantasy landscape, trending on artstation'
a__: Optional[Any] = np.random.RandomState(0)
a__: List[Any] = pipe(
prompt=lowercase , image=lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase , output_type='np' , )
a__: Any = output.images
a__: Optional[int] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
a__: Dict = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 217
|
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __snake_case ( unittest.TestCase ):
a__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
a__: Optional[int] = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset')
a__: Optional[Any] = VideoClassificationPipeline(model=lowercase , image_processor=lowercase , top_k=2)
a__: Optional[int] = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def lowerCamelCase_ ( self , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
for example in examples:
a__: List[str] = video_classifier(lowercase)
self.assertEqual(
lowercase , [
{'score': ANY(lowercase), 'label': ANY(lowercase)},
{'score': ANY(lowercase), 'label': ANY(lowercase)},
] , )
@require_torch
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Dict = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
a__: Optional[Any] = VideoMAEFeatureExtractor(
size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10})
a__: Any = pipeline(
'video-classification' , model=lowercase , feature_extractor=lowercase , frame_sampling_rate=4)
a__: Union[str, Any] = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset')
a__: Tuple = video_classifier(lowercase , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}] , )
a__: Optional[Any] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
] , )
@require_tf
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
pass
| 217
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def lowercase (_snake_case ,_snake_case=False ,_snake_case=False ,_snake_case=False ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""transformer.blocks.{i}.norm1.weight""", f"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.norm1.bias""", f"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""transformer.blocks.{i}.attn.proj.weight""", f"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""transformer.blocks.{i}.attn.proj.bias""", f"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""transformer.blocks.{i}.norm2.weight""", f"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.norm2.bias""", f"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""transformer.blocks.{i}.mlp.fc1.weight""", f"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc1.bias""", f"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.weight""", f"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.bias""", f"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def lowercase (_snake_case ,_snake_case ) -> List[str]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
__UpperCamelCase = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCamelCase = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.weight""" )
__UpperCamelCase = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase = in_proj_weight[
: config.hidden_size, :
]
__UpperCamelCase = in_proj_bias[: config.hidden_size]
__UpperCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCamelCase = in_proj_weight[
-config.hidden_size :, :
]
__UpperCamelCase = in_proj_bias[-config.hidden_size :]
def lowercase (_snake_case ) -> int:
'''simple docstring'''
__UpperCamelCase = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_snake_case ,_snake_case )
def lowercase (_snake_case ,_snake_case ,_snake_case ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase = dct.pop(_snake_case )
__UpperCamelCase = val
@torch.no_grad()
def lowercase (_snake_case ,_snake_case ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = ViltConfig(image_size=384 ,patch_size=32 ,tie_word_embeddings=_snake_case )
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
if "vqa" in checkpoint_url:
__UpperCamelCase = True
__UpperCamelCase = 3129
__UpperCamelCase = "huggingface/label-files"
__UpperCamelCase = "vqa2-id2label.json"
__UpperCamelCase = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="dataset" ) ,"r" ) )
__UpperCamelCase = {int(_snake_case ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
__UpperCamelCase = ViltForQuestionAnswering(_snake_case )
elif "nlvr" in checkpoint_url:
__UpperCamelCase = True
__UpperCamelCase = 2
__UpperCamelCase = {0: "False", 1: "True"}
__UpperCamelCase = {v: k for k, v in config.idalabel.items()}
__UpperCamelCase = 3
__UpperCamelCase = ViltForImagesAndTextClassification(_snake_case )
elif "irtr" in checkpoint_url:
__UpperCamelCase = True
__UpperCamelCase = ViltForImageAndTextRetrieval(_snake_case )
elif "mlm_itm" in checkpoint_url:
__UpperCamelCase = True
__UpperCamelCase = ViltForMaskedLM(_snake_case )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
__UpperCamelCase = torch.hub.load_state_dict_from_url(_snake_case ,map_location="cpu" )["state_dict"]
__UpperCamelCase = create_rename_keys(_snake_case ,_snake_case ,_snake_case ,_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case ,_snake_case ,_snake_case )
read_in_q_k_v(_snake_case ,_snake_case )
if mlm_model or irtr_model:
__UpperCamelCase = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(_snake_case ,_snake_case )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
__UpperCamelCase , __UpperCamelCase = model.load_state_dict(_snake_case ,strict=_snake_case )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_snake_case )
# Define processor
__UpperCamelCase = ViltImageProcessor(size=384 )
__UpperCamelCase = BertTokenizer.from_pretrained("bert-base-uncased" )
__UpperCamelCase = ViltProcessor(_snake_case ,_snake_case )
# Forward pass on example inputs (image + text)
if nlvr_model:
__UpperCamelCase = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" ,stream=_snake_case ).raw )
__UpperCamelCase = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" ,stream=_snake_case ).raw )
__UpperCamelCase = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
__UpperCamelCase = processor(_snake_case ,_snake_case ,return_tensors="pt" )
__UpperCamelCase = processor(_snake_case ,_snake_case ,return_tensors="pt" )
__UpperCamelCase = model(
input_ids=encoding_a.input_ids ,pixel_values=encoding_a.pixel_values ,pixel_values_a=encoding_a.pixel_values ,)
else:
__UpperCamelCase = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" ,stream=_snake_case ).raw )
if mlm_model:
__UpperCamelCase = "a bunch of [MASK] laying on a [MASK]."
else:
__UpperCamelCase = "How many cats are there?"
__UpperCamelCase = processor(_snake_case ,_snake_case ,return_tensors="pt" )
__UpperCamelCase = model(**_snake_case )
# Verify outputs
if mlm_model:
__UpperCamelCase = torch.Size([1, 11, 30522] )
__UpperCamelCase = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] ,_snake_case ,atol=1e-4 )
# verify masked token prediction equals "cats"
__UpperCamelCase = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
__UpperCamelCase = torch.Size([1, 3129] )
__UpperCamelCase = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] ,_snake_case ,atol=1e-4 )
# verify vqa prediction equals "2"
__UpperCamelCase = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
__UpperCamelCase = torch.Size([1, 2] )
__UpperCamelCase = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_A = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 505
|
"""simple docstring"""
def lowercase (_snake_case ) -> int:
'''simple docstring'''
__UpperCamelCase = len(_snake_case )
__UpperCamelCase = len(matrix[0] )
__UpperCamelCase = min(_snake_case ,_snake_case )
for row in range(_snake_case ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 ,_snake_case ):
__UpperCamelCase = matrix[col][row] / matrix[row][row]
for i in range(_snake_case ,_snake_case ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
__UpperCamelCase = True
for i in range(row + 1 ,_snake_case ):
if matrix[i][row] != 0:
__UpperCamelCase , __UpperCamelCase = matrix[i], matrix[row]
__UpperCamelCase = False
break
if reduce:
rank -= 1
for i in range(_snake_case ):
__UpperCamelCase = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 505
| 1
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1_00_00_00 , __lowerCAmelCase = 10 ) -> int:
'''simple docstring'''
lowercase_ = defaultdict(_UpperCamelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowercase_ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowercase_ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_UpperCamelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"{solution() = }")
| 715
|
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowercase__ = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 5_0_2_5_7 , lowerCAmelCase_ : int = 1_0_2_4 , lowerCAmelCase_ : int = 7_6_8 , lowerCAmelCase_ : int = 1_2 , lowerCAmelCase_ : int = 1_2 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : str = "gelu_new" , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 1E-5 , lowerCAmelCase_ : float = 0.02 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , ):
"""simple docstring"""
super().__init__()
lowercase_ = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''')
lowercase_ = prefix_inner_dim
lowercase_ = prefix_hidden_dim
lowercase_ = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim)
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowercase_ = (
nn.Linear(self.prefix_hidden_dim , lowerCAmelCase_) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowercase_ = GPTaConfig(
vocab_size=lowerCAmelCase_ , n_positions=lowerCAmelCase_ , n_embd=lowerCAmelCase_ , n_layer=lowerCAmelCase_ , n_head=lowerCAmelCase_ , n_inner=lowerCAmelCase_ , activation_function=lowerCAmelCase_ , resid_pdrop=lowerCAmelCase_ , embd_pdrop=lowerCAmelCase_ , attn_pdrop=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , initializer_range=lowerCAmelCase_ , scale_attn_weights=lowerCAmelCase_ , use_cache=lowerCAmelCase_ , scale_attn_by_inverse_layer_idx=lowerCAmelCase_ , reorder_and_upcast_attn=lowerCAmelCase_ , )
lowercase_ = GPTaLMHeadModel(lowerCAmelCase_)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : torch.Tensor , lowerCAmelCase_ : torch.Tensor , lowerCAmelCase_ : Optional[torch.Tensor] = None , lowerCAmelCase_ : Optional[torch.Tensor] = None , ):
"""simple docstring"""
lowercase_ = self.transformer.transformer.wte(lowerCAmelCase_)
lowercase_ = self.encode_prefix(lowerCAmelCase_)
lowercase_ = self.decode_prefix(lowerCAmelCase_)
lowercase_ = torch.cat((prefix_embeds, embedding_text) , dim=1)
if labels is not None:
lowercase_ = self.get_dummy_token(input_ids.shape[0] , input_ids.device)
lowercase_ = torch.cat((dummy_token, input_ids) , dim=1)
lowercase_ = self.transformer(inputs_embeds=lowerCAmelCase_ , labels=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : torch.device):
"""simple docstring"""
return torch.zeros(lowerCAmelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
return self.encode_prefix(lowerCAmelCase_)
@torch.no_grad()
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = torch.split(lowerCAmelCase_ , 1 , dim=0)
lowercase_ = []
lowercase_ = []
for feature in features:
lowercase_ = self.decode_prefix(feature.to(lowerCAmelCase_)) # back to the clip feature
# Only support beam search for now
lowercase_ , lowercase_ = self.generate_beam(
input_embeds=lowerCAmelCase_ , device=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_)
generated_tokens.append(output_tokens[0])
generated_seq_lengths.append(seq_lengths[0])
lowercase_ = torch.stack(lowerCAmelCase_)
lowercase_ = torch.stack(lowerCAmelCase_)
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int = 5 , lowerCAmelCase_ : int = 6_7 , lowerCAmelCase_ : float = 1.0 , lowerCAmelCase_ : Optional[int] = None , ):
"""simple docstring"""
lowercase_ = eos_token_id
lowercase_ = None
lowercase_ = None
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=torch.int)
lowercase_ = torch.zeros(lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=torch.bool)
if input_embeds is not None:
lowercase_ = input_embeds
else:
lowercase_ = self.transformer.transformer.wte(lowerCAmelCase_)
for i in range(lowerCAmelCase_):
lowercase_ = self.transformer(inputs_embeds=lowerCAmelCase_)
lowercase_ = outputs.logits
lowercase_ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowercase_ = logits.softmax(-1).log()
if scores is None:
lowercase_ , lowercase_ = logits.topk(lowerCAmelCase_ , -1)
lowercase_ = generated.expand(lowerCAmelCase_ , *generated.shape[1:])
lowercase_ , lowercase_ = next_tokens.permute(1 , 0), scores.squeeze(0)
if tokens is None:
lowercase_ = next_tokens
else:
lowercase_ = tokens.expand(lowerCAmelCase_ , *tokens.shape[1:])
lowercase_ = torch.cat((tokens, next_tokens) , dim=1)
else:
lowercase_ = -float(np.inf)
lowercase_ = 0
lowercase_ = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowercase_ = scores_sum / seq_lengths[:, None]
lowercase_ , lowercase_ = scores_sum_average.view(-1).topk(lowerCAmelCase_ , -1)
lowercase_ = next_tokens // scores_sum.shape[1]
lowercase_ = seq_lengths[next_tokens_source]
lowercase_ = next_tokens % scores_sum.shape[1]
lowercase_ = next_tokens.unsqueeze(1)
lowercase_ = tokens[next_tokens_source]
lowercase_ = torch.cat((tokens, next_tokens) , dim=1)
lowercase_ = generated[next_tokens_source]
lowercase_ = scores_sum_average * seq_lengths
lowercase_ = is_stopped[next_tokens_source]
lowercase_ = self.transformer.transformer.wte(next_tokens.squeeze()).view(generated.shape[0] , 1 , -1)
lowercase_ = torch.cat((generated, next_token_embed) , dim=1)
lowercase_ = is_stopped + next_tokens.eq(lowerCAmelCase_).squeeze()
if is_stopped.all():
break
lowercase_ = scores / seq_lengths
lowercase_ = scores.argsort(descending=lowerCAmelCase_)
# tokens tensors are already padded to max_seq_length
lowercase_ = [tokens[i] for i in order]
lowercase_ = torch.stack(lowerCAmelCase_ , dim=0)
lowercase_ = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype)
return output_texts, seq_lengths
| 100
| 0
|
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def lowerCamelCase () -> Tuple:
lowercase :List[str] = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=a_ , default=a_ , required=a_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=a_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=a_ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=a_ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=a_ , default=0 , help='''cuda_id.''' , )
lowercase :Any = parser.parse_args()
return args
def lowerCamelCase (a_ :str , a_ :Tuple , a_ :Optional[int]) -> List[str]:
if not len(a_) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''')
lowercase , lowercase :Optional[Any] = imgs[0].size
lowercase :Tuple = Image.new('''RGB''' , size=(cols * w, rows * h))
lowercase , lowercase :Union[str, Any] = grid.size
for i, img in enumerate(a_):
grid.paste(a_ , box=(i % cols * w, i // cols * h))
return grid
def lowerCamelCase (a_ :int , a_ :List[str]="robotic cat with wings" , a_ :Tuple=7.5 , a_ :int=50 , a_ :List[str]=1 , a_ :str=42 , ) -> Any:
lowercase :List[str] = torch.Generator(pipeline.device).manual_seed(a_)
lowercase :Any = pipeline(
a_ , guidance_scale=a_ , num_inference_steps=a_ , generator=a_ , num_images_per_prompt=a_ , ).images
lowercase :List[str] = int(math.sqrt(a_))
lowercase :str = image_grid(a_ , rows=_rows , cols=num_images_per_prompt // _rows)
return grid, images
UpperCAmelCase = parse_args()
# Load models and create wrapper for stable diffusion
UpperCAmelCase = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
UpperCAmelCase = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
UpperCAmelCase = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
UpperCAmelCase = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
UpperCAmelCase = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
UpperCAmelCase = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
UpperCAmelCase = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
UpperCAmelCase = unet.to(torch.device('''cuda''', args.cuda_id))
UpperCAmelCase = pipeline.to(unet.device)
UpperCAmelCase , UpperCAmelCase = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
UpperCAmelCase = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 677
|
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
UpperCAmelCase = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
UpperCAmelCase = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
UpperCAmelCase = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def __snake_case ( self : int ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def __snake_case ( self : List[Any] , snake_case__ : str , snake_case__ : int , snake_case__ : str=None , snake_case__ : List[Any]="uniform_average" , snake_case__ : Dict=True ):
'''simple docstring'''
lowercase :Dict = mean_squared_error(
snake_case__ , snake_case__ , sample_weight=snake_case__ , multioutput=snake_case__ , squared=snake_case__ )
return {"mse": mse}
| 677
| 1
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__lowerCAmelCase : Optional[int] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = super().to_dict()
for k, v in d.items():
if isinstance(_lowercase , _lowercase ):
snake_case_ : Union[str, Any] = v.to_dict()
return d
| 21
|
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
| 1
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowercase_ = numpy.array([0, 0])
lowercase_ = numpy.array([0.5, 0.866_0254])
lowercase_ = numpy.array([1, 0])
lowercase_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[Any] = initial_vectors
for _ in range(__lowerCAmelCase ):
__lowerCamelCase : Dict = iteration_step(__lowerCAmelCase )
return vectors
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = []
for i, start_vector in enumerate(vectors[:-1] ):
__lowerCamelCase : Union[str, Any] = vectors[i + 1]
new_vectors.append(__lowerCAmelCase )
__lowerCamelCase : Dict = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[Any] = numpy.radians(__lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase : str = numpy.cos(__lowerCAmelCase ), numpy.sin(__lowerCAmelCase )
__lowerCamelCase : Optional[Any] = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__lowerCAmelCase , __lowerCAmelCase )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[Any] = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__lowerCamelCase , __lowerCamelCase : int = zip(*__lowerCAmelCase )
plt.plot(__lowerCAmelCase , __lowerCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 669
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
_SCREAMING_SNAKE_CASE = {
"google/rembert": 256,
}
_SCREAMING_SNAKE_CASE = "▁"
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = RemBertTokenizer
def __init__( self : List[Any] , __snake_case : int=None , __snake_case : Union[str, Any]=None , __snake_case : Tuple=True , __snake_case : Dict=True , __snake_case : str=False , __snake_case : Union[str, Any]="[CLS]" , __snake_case : Optional[int]="[SEP]" , __snake_case : str="<unk>" , __snake_case : Dict="[SEP]" , __snake_case : Dict="<pad>" , __snake_case : Union[str, Any]="[CLS]" , __snake_case : int="[MASK]" , **__snake_case : Optional[int] , )-> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
snake_case = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , **__snake_case , )
snake_case = do_lower_case
snake_case = remove_space
snake_case = keep_accents
snake_case = vocab_file
snake_case = False if not self.vocab_file else True
def lowerCAmelCase ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None )-> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False )-> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1]
def lowerCAmelCase ( self : int , __snake_case : List[int] , __snake_case : Optional[List[int]] = None )-> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : Tuple , __snake_case : str , __snake_case : Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error("""Vocabulary path ({}) should be a directory""".format(__snake_case ) )
return
snake_case = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 369
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 100
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
while a != 0:
lowercase_ , lowercase_ = b % a, a
return b
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
if gcd(__lowerCAmelCase , __lowerCAmelCase ) != 1:
lowercase_ = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(__lowerCAmelCase )
lowercase_ , lowercase_ , lowercase_ = 1, 0, a
lowercase_ , lowercase_ , lowercase_ = 0, 1, m
while va != 0:
lowercase_ = ua // va
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 100
| 1
|
'''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
lowerCAmelCase_ = """EncodecFeatureExtractor"""
lowerCAmelCase_ = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Optional[int]:
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
_UpperCamelCase =self.feature_extractor
_UpperCamelCase =False
def UpperCamelCase__ ( self : int , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Any=True ) -> int:
return self.tokenizer.get_decoder_prompt_ids(task=UpperCamelCase__ , language=UpperCamelCase__ , no_timestamps=UpperCamelCase__ )
def __call__( self : Optional[Any] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : List[Any] ) -> List[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase__ , **UpperCamelCase__ )
_UpperCamelCase =kwargs.pop('''audio''' , UpperCamelCase__ )
_UpperCamelCase =kwargs.pop('''sampling_rate''' , UpperCamelCase__ )
_UpperCamelCase =kwargs.pop('''text''' , UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
_UpperCamelCase =args[0]
_UpperCamelCase =args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
_UpperCamelCase =self.tokenizer(UpperCamelCase__ , **UpperCamelCase__ )
if audio is not None:
_UpperCamelCase =self.feature_extractor(UpperCamelCase__ , *UpperCamelCase__ , sampling_rate=UpperCamelCase__ , **UpperCamelCase__ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_UpperCamelCase =audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
_UpperCamelCase =audio_inputs['''padding_mask''']
return inputs
def UpperCamelCase__ ( self : Optional[Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[int] ) -> Optional[int]:
_UpperCamelCase =kwargs.pop('''audio''' , UpperCamelCase__ )
_UpperCamelCase =kwargs.pop('''padding_mask''' , UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
_UpperCamelCase =args[0]
_UpperCamelCase =args[1:]
if audio_values is not None:
return self._decode_audio(UpperCamelCase__ , padding_mask=UpperCamelCase__ )
else:
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase__ ( self : Dict , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any] ) -> Any:
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase__ ( self : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional = None ) -> List[np.ndarray]:
_UpperCamelCase =to_numpy(UpperCamelCase__ )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase =audio_values.shape
if padding_mask is None:
return list(UpperCamelCase__ )
_UpperCamelCase =to_numpy(UpperCamelCase__ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_UpperCamelCase =seq_len - padding_mask.shape[-1]
_UpperCamelCase =1 - self.feature_extractor.padding_value
_UpperCamelCase =np.pad(UpperCamelCase__ , ((0, 0), (0, difference)) , '''constant''' , constant_values=UpperCamelCase__ )
_UpperCamelCase =audio_values.tolist()
for i in range(UpperCamelCase__ ):
_UpperCamelCase =np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_UpperCamelCase =sliced_audio.reshape(UpperCamelCase__ , -1 )
return audio_values
| 404
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( lowercase_ , unittest.TestCase):
"""simple docstring"""
lowerCAmelCase_ = GPTSanJapaneseTokenizer
lowerCAmelCase_ = False
lowerCAmelCase_ = {"""do_clean_text""": False, """add_prefix_space""": False}
def UpperCamelCase__ ( self : Tuple ) -> Optional[Any]:
super().setUp()
# fmt: off
_UpperCamelCase =['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
_UpperCamelCase ={'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
_UpperCamelCase ={'''unk_token''': '''<unk>'''}
_UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(UpperCamelCase__ ) )
def UpperCamelCase__ ( self : Optional[Any] , **UpperCamelCase__ : List[Any] ) -> Dict:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCamelCase__ ( self : str , UpperCamelCase__ : List[str] ) -> int:
_UpperCamelCase ='''こんにちは、世界。 \nこんばんは、㔺界。😀'''
_UpperCamelCase ='''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def UpperCamelCase__ ( self : Union[str, Any] , UpperCamelCase__ : List[Any] ) -> List[Any]:
_UpperCamelCase , _UpperCamelCase =self.get_input_output_texts(UpperCamelCase__ )
_UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
_UpperCamelCase =tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
return text, ids
def UpperCamelCase__ ( self : Tuple ) -> Dict:
pass # TODO add if relevant
def UpperCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
pass # TODO add if relevant
def UpperCamelCase__ ( self : Tuple ) -> int:
pass # TODO add if relevant
def UpperCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase =self.get_tokenizer()
# Testing tokenization
_UpperCamelCase ='''こんにちは、世界。 こんばんは、㔺界。'''
_UpperCamelCase =['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
_UpperCamelCase =tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
# Testing conversion to ids without special tokens
_UpperCamelCase =[0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_UpperCamelCase =tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
# Testing conversion to ids with special tokens
_UpperCamelCase =tokens + [tokenizer.unk_token]
_UpperCamelCase =[0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
_UpperCamelCase =tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
_UpperCamelCase =self.get_tokenizer()
# Testing tokenization
_UpperCamelCase ='''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
_UpperCamelCase ='''こんにちは、、、、世界。こんばんは、、、、世界。'''
_UpperCamelCase =tokenizer.encode(UpperCamelCase__ )
_UpperCamelCase =tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def UpperCamelCase__ ( self : str ) -> str:
_UpperCamelCase =self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
_UpperCamelCase ='''こんにちは、世界。'''
_UpperCamelCase ='''こんばんは、㔺界。😀'''
_UpperCamelCase ='''こんにちは、世界。こんばんは、世界。😀'''
_UpperCamelCase =tokenizer.encode(prefix_text + input_text )
_UpperCamelCase =tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
_UpperCamelCase =tokenizer.encode(UpperCamelCase__ , prefix_text=UpperCamelCase__ )
_UpperCamelCase =tokenizer.decode(UpperCamelCase__ )
_UpperCamelCase =tokenizer.decode(UpperCamelCase__ )
_UpperCamelCase =tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def UpperCamelCase__ ( self : Optional[int] ) -> Any:
_UpperCamelCase =self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
_UpperCamelCase ='''こんにちは、世界。'''
_UpperCamelCase ='''こんばんは、㔺界。😀'''
_UpperCamelCase =len(tokenizer.encode(UpperCamelCase__ ) ) - 2
_UpperCamelCase =len(tokenizer.encode(UpperCamelCase__ ) ) - 2
_UpperCamelCase =[1] + [0] * (len_prefix + len_text + 1)
_UpperCamelCase =[1] * (len_prefix + len_text + 1) + [0]
_UpperCamelCase =[1] + [1] * (len_prefix) + [0] * (len_text + 1)
_UpperCamelCase =tokenizer(prefix_text + input_text ).token_type_ids
_UpperCamelCase =tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
_UpperCamelCase =tokenizer(UpperCamelCase__ , prefix_text=UpperCamelCase__ ).token_type_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def UpperCamelCase__ ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase =self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
_UpperCamelCase =tokenizer.encode('''あンいワ''' )
_UpperCamelCase =tokenizer.encode('''''' , prefix_text='''あンいワ''' )
_UpperCamelCase =tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , tokenizer.decode(UpperCamelCase__ ) )
self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , tokenizer.decode(UpperCamelCase__ ) )
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase__ ( self : Tuple ) -> Dict:
_UpperCamelCase =self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
_UpperCamelCase =[['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
_UpperCamelCase =tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ )
_UpperCamelCase =tokenizer.batch_encode_plus(UpperCamelCase__ , padding=UpperCamelCase__ )
# fmt: off
_UpperCamelCase =[[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
_UpperCamelCase =[[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_UpperCamelCase =[[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , UpperCamelCase__ )
self.assertListEqual(x_token.token_type_ids , UpperCamelCase__ )
self.assertListEqual(x_token.attention_mask , UpperCamelCase__ )
self.assertListEqual(x_token_a.input_ids , UpperCamelCase__ )
self.assertListEqual(x_token_a.token_type_ids , UpperCamelCase__ )
self.assertListEqual(x_token_a.attention_mask , UpperCamelCase__ )
def UpperCamelCase__ ( self : Optional[int] ) -> int:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCamelCase__ ( self : Dict ) -> Optional[int]:
# tokenizer has no padding token
pass
| 404
| 1
|
"""simple docstring"""
def lowercase__ ( snake_case_ :int = 2_000_000 ):
__UpperCAmelCase = [0 for i in range(n + 1 )]
__UpperCAmelCase = 1
__UpperCAmelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , snake_case__ ):
__UpperCAmelCase = 1
__UpperCAmelCase = 0
for i in range(snake_case__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 700
|
"""simple docstring"""
def lowercase__ ( snake_case_ :List[Any] , snake_case_ :str , snake_case_ :Tuple , snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[Any] ):
if index == r:
for j in range(snake_case_ ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__UpperCAmelCase = arr[i]
combination_util(snake_case_ , snake_case_ , snake_case_ , index + 1 , snake_case_ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :str , snake_case_ :List[str] ):
# A temporary array to store all combination one by one
__UpperCAmelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(snake_case_ , snake_case_ , snake_case_ , 0 , snake_case_ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_lowercase : List[str] = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 397
| 0
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Any ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any]=True ):
model.train()
__UpperCAmelCase : Tuple = model(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = F.mse_loss(__lowerCamelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str=False ):
set_seed(42 )
__UpperCAmelCase : Optional[int] = RegressionModel()
__UpperCAmelCase : Tuple = deepcopy(__lowerCamelCase )
__UpperCAmelCase : Tuple = RegressionDataset(length=80 )
__UpperCAmelCase : Dict = DataLoader(__lowerCamelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
__UpperCAmelCase : Optional[Any] = AdamW(params=model.parameters() , lr=1E-3 )
__UpperCAmelCase : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1E-3 )
__UpperCAmelCase : Any = LambdaLR(__lowerCamelCase , lr_lambda=lambda __lowerCamelCase : epoch**0.6_5 )
__UpperCAmelCase : str = LambdaLR(__lowerCamelCase , lr_lambda=lambda __lowerCamelCase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = accelerator.prepare(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
__UpperCAmelCase , __UpperCAmelCase : Tuple = accelerator.prepare(__lowerCamelCase , __lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
# Test when on a single CPU or GPU that the context manager does nothing
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = get_training_setup(__lowerCamelCase )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = next(iter(__lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase : Tuple = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase : Any = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCamelCase ):
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
# Sync grads
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__UpperCAmelCase : Optional[Any] = ddp_input[torch.randperm(len(__lowerCamelCase ) )]
def lowerCamelCase__ ( __lowerCamelCase : Any ):
# Test on distributed setup that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = get_training_setup(__lowerCamelCase )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = next(iter(__lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase : Tuple = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCamelCase ):
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
# Sync grads
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__UpperCAmelCase : str = ddp_input[torch.randperm(len(__lowerCamelCase ) )]
def lowerCamelCase__ ( __lowerCamelCase : Dict=False , __lowerCamelCase : List[Any]=False ):
__UpperCAmelCase : Tuple = Accelerator(
split_batches=__lowerCamelCase , dispatch_batches=__lowerCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = get_training_setup(__lowerCamelCase )
for iteration, batch in enumerate(__lowerCamelCase ):
__UpperCAmelCase , __UpperCAmelCase : List[str] = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase : List[str] = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase : List[str] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__lowerCamelCase ):
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__UpperCAmelCase : Dict = ddp_input[torch.randperm(len(__lowerCamelCase ) )]
GradientState._reset_state()
def lowerCamelCase__ ( __lowerCamelCase : List[str]=False , __lowerCamelCase : Union[str, Any]=False ):
__UpperCAmelCase : Optional[int] = Accelerator(
split_batches=__lowerCamelCase , dispatch_batches=__lowerCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = get_training_setup(__lowerCamelCase , __lowerCamelCase )
for iteration, batch in enumerate(__lowerCamelCase ):
__UpperCAmelCase , __UpperCAmelCase : str = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__lowerCamelCase ):
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
__UpperCAmelCase : Optional[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def lowerCamelCase__ ( ):
__UpperCAmelCase : List[str] = Accelerator()
__UpperCAmelCase : Optional[Any] = RegressionDataset(length=80 )
__UpperCAmelCase : Any = DataLoader(__lowerCamelCase , batch_size=16 )
__UpperCAmelCase : Optional[Any] = RegressionDataset(length=96 )
__UpperCAmelCase : Union[str, Any] = DataLoader(__lowerCamelCase , batch_size=16 )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = accelerator.prepare(__lowerCamelCase , __lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCamelCase )
if iteration < len(__lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCamelCase )
if batch_num < len(__lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCamelCase__ ( ):
__UpperCAmelCase : int = Accelerator()
__UpperCAmelCase : Dict = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(__lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(__lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(__lowerCamelCase , __lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : int ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 63
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Union[str, Any] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[int] = 'git_vision_model'
def __init__( self : str , __lowercase : List[str]=768 , __lowercase : List[str]=3072 , __lowercase : List[Any]=12 , __lowercase : Dict=12 , __lowercase : int=3 , __lowercase : Any=224 , __lowercase : Optional[int]=16 , __lowercase : Dict="quick_gelu" , __lowercase : Any=1e-5 , __lowercase : str=0.0 , __lowercase : int=0.02 , **__lowercase : int , ) -> List[str]:
super().__init__(**__lowercase )
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : str = patch_size
__UpperCAmelCase : Tuple = image_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : List[Any] = hidden_act
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : Union[str, os.PathLike] , **__lowercase : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = cls.get_config_dict(__lowercase , **__lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
__UpperCAmelCase : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase , **__lowercase )
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'git'
def __init__( self : Optional[int] , __lowercase : List[Any]=None , __lowercase : Tuple=30522 , __lowercase : str=768 , __lowercase : Optional[int]=6 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[int]=3072 , __lowercase : List[str]="gelu" , __lowercase : Tuple=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=1024 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[Any]=1e-1_2 , __lowercase : List[Any]=0 , __lowercase : Dict="absolute" , __lowercase : Dict=True , __lowercase : Any=False , __lowercase : Optional[int]=101 , __lowercase : str=102 , __lowercase : Union[str, Any]=None , **__lowercase : Dict , ) -> Tuple:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , pad_token_id=__lowercase , **__lowercase )
if vision_config is None:
__UpperCAmelCase : Optional[int] = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
__UpperCAmelCase : Tuple = GitVisionConfig(**__lowercase )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = position_embedding_type
__UpperCAmelCase : Dict = use_cache
__UpperCAmelCase : int = tie_word_embeddings
__UpperCAmelCase : Optional[int] = num_image_with_embedding
__UpperCAmelCase : Optional[int] = bos_token_id
__UpperCAmelCase : List[Any] = eos_token_id
def UpperCAmelCase ( self : str ) -> int:
__UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : List[str] = self.vision_config.to_dict()
__UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 63
| 1
|
import numpy as np
import datasets
snake_case__ : int = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
snake_case__ : Optional[Any] = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
snake_case__ : Union[str, Any] = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'X': datasets.Sequence(datasets.Value('float' , id='sequence' ) , id='X' ),
} ) , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> int:
UpperCamelCase_ = np.array(UpperCamelCase__ )
UpperCamelCase_ = np.array(UpperCamelCase__ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('Expected `X` to be a 2D vector' )
if len(reference_distribution.shape ) != 2:
raise ValueError('Expected `reference_distribution` to be a 2D vector' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension' )
# Get mahalanobis distance for each prediction
UpperCamelCase_ = X - np.mean(UpperCamelCase__ )
UpperCamelCase_ = np.cov(reference_distribution.T )
try:
UpperCamelCase_ = np.linalg.inv(UpperCamelCase__ )
except np.linalg.LinAlgError:
UpperCamelCase_ = np.linalg.pinv(UpperCamelCase__ )
UpperCamelCase_ = np.dot(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase_ = np.dot(UpperCamelCase__ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 710
|
from typing import Any
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
_validation(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
# Creates data structures and fill initial step
UpperCamelCase_ = {}
UpperCamelCase_ = {}
for state in states_space:
UpperCamelCase_ = observations_space[0]
UpperCamelCase_ = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
UpperCamelCase_ = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__lowercase)):
UpperCamelCase_ = observations_space[o]
UpperCamelCase_ = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
UpperCamelCase_ = ''
UpperCamelCase_ = -1
for k_state in states_space:
UpperCamelCase_ = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
UpperCamelCase_ = probability
UpperCamelCase_ = k_state
# Update probabilities and pointers dicts
UpperCamelCase_ = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
UpperCamelCase_ = arg_max
# The final observation
UpperCamelCase_ = observations_space[len(__lowercase) - 1]
# argmax for given final observation
UpperCamelCase_ = ''
UpperCamelCase_ = -1
for k_state in states_space:
UpperCamelCase_ = probabilities[(k_state, final_observation)]
if probability > max_probability:
UpperCamelCase_ = probability
UpperCamelCase_ = k_state
UpperCamelCase_ = arg_max
# Process pointers backwards
UpperCamelCase_ = last_state
UpperCamelCase_ = []
for o in range(len(__lowercase) - 1 , -1 , -1):
result.append(__lowercase)
UpperCamelCase_ = pointers[previous, observations_space[o]]
result.reverse()
return result
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
_validate_not_empty(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
_validate_lists(__lowercase , __lowercase)
_validate_dicts(
__lowercase , __lowercase , __lowercase)
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
]):
raise ValueError('There\'s an empty parameter')
def _snake_case (__lowercase , __lowercase):
_validate_list(__lowercase , 'observations_space')
_validate_list(__lowercase , 'states_space')
def _snake_case (__lowercase , __lowercase):
if not isinstance(_object , __lowercase):
UpperCamelCase_ = f"""{var_name} must be a list"""
raise ValueError(__lowercase)
else:
for x in _object:
if not isinstance(__lowercase , __lowercase):
UpperCamelCase_ = f"""{var_name} must be a list of strings"""
raise ValueError(__lowercase)
def _snake_case (__lowercase , __lowercase , __lowercase , ):
_validate_dict(__lowercase , 'initial_probabilities' , __lowercase)
_validate_nested_dict(__lowercase , 'transition_probabilities')
_validate_nested_dict(__lowercase , 'emission_probabilities')
def _snake_case (__lowercase , __lowercase):
_validate_dict(_object , __lowercase , __lowercase)
for x in _object.values():
_validate_dict(__lowercase , __lowercase , __lowercase , __lowercase)
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase = False):
if not isinstance(_object , __lowercase):
UpperCamelCase_ = f"""{var_name} must be a dict"""
raise ValueError(__lowercase)
if not all(isinstance(__lowercase , __lowercase) for x in _object):
UpperCamelCase_ = f"""{var_name} all keys must be strings"""
raise ValueError(__lowercase)
if not all(isinstance(__lowercase , __lowercase) for x in _object.values()):
UpperCamelCase_ = 'nested dictionary ' if nested else ''
UpperCamelCase_ = f"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(__lowercase)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 618
| 0
|
from __future__ import annotations
from typing import Any
def __A ( _lowercase ):
'''simple docstring'''
create_state_space_tree(_lowercase , [] , 0 )
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if index == len(_lowercase ):
print(_lowercase )
return
create_state_space_tree(_lowercase , _lowercase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_lowercase , _lowercase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__A = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 484
|
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ ='''efficientnet'''
def __init__( self , __SCREAMING_SNAKE_CASE = 3 , __SCREAMING_SNAKE_CASE = 600 , __SCREAMING_SNAKE_CASE = 2.0 , __SCREAMING_SNAKE_CASE = 3.1 , __SCREAMING_SNAKE_CASE = 8 , __SCREAMING_SNAKE_CASE = [3, 3, 5, 3, 5, 5, 3] , __SCREAMING_SNAKE_CASE = [32, 16, 24, 40, 80, 112, 192] , __SCREAMING_SNAKE_CASE = [16, 24, 40, 80, 112, 192, 320] , __SCREAMING_SNAKE_CASE = [] , __SCREAMING_SNAKE_CASE = [1, 2, 2, 2, 1, 2, 1] , __SCREAMING_SNAKE_CASE = [1, 2, 2, 3, 3, 4, 1] , __SCREAMING_SNAKE_CASE = [1, 6, 6, 6, 6, 6, 6] , __SCREAMING_SNAKE_CASE = 0.25 , __SCREAMING_SNAKE_CASE = "swish" , __SCREAMING_SNAKE_CASE = 2560 , __SCREAMING_SNAKE_CASE = "mean" , __SCREAMING_SNAKE_CASE = 0.02 , __SCREAMING_SNAKE_CASE = 0.001 , __SCREAMING_SNAKE_CASE = 0.99 , __SCREAMING_SNAKE_CASE = 0.5 , __SCREAMING_SNAKE_CASE = 0.2 , **__SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =num_channels
snake_case__ : Tuple =image_size
snake_case__ : int =width_coefficient
snake_case__ : List[str] =depth_coefficient
snake_case__ : Optional[int] =depth_divisor
snake_case__ : Any =kernel_sizes
snake_case__ : int =in_channels
snake_case__ : Union[str, Any] =out_channels
snake_case__ : Tuple =depthwise_padding
snake_case__ : List[str] =strides
snake_case__ : int =num_block_repeats
snake_case__ : Optional[Any] =expand_ratios
snake_case__ : List[Any] =squeeze_expansion_ratio
snake_case__ : int =hidden_act
snake_case__ : Union[str, Any] =hidden_dim
snake_case__ : int =pooling_type
snake_case__ : Union[str, Any] =initializer_range
snake_case__ : str =batch_norm_eps
snake_case__ : List[str] =batch_norm_momentum
snake_case__ : Union[str, Any] =dropout_rate
snake_case__ : List[Any] =drop_connect_rate
snake_case__ : Any =sum(__SCREAMING_SNAKE_CASE ) * 4
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ =version.parse('''1.11''' )
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase ( self ) -> float:
"""simple docstring"""
return 1e-5
| 381
| 0
|
import math
import tensorflow as tf
from packaging import version
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor(lowerCAmelCase_)
_SCREAMING_SNAKE_CASE =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0) ,x.dtype)))
return x * cdf
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor(lowerCAmelCase_)
_SCREAMING_SNAKE_CASE =tf.cast(math.pi ,x.dtype)
_SCREAMING_SNAKE_CASE =tf.cast(0.04_4715 ,x.dtype)
_SCREAMING_SNAKE_CASE =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi) * (x + coeff * tf.pow(lowerCAmelCase_ ,3))))
return x * cdf
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor(lowerCAmelCase_)
return x * tf.tanh(tf.math.softplus(lowerCAmelCase_))
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor(lowerCAmelCase_)
_SCREAMING_SNAKE_CASE =tf.cast(0.04_4715 ,x.dtype)
_SCREAMING_SNAKE_CASE =tf.cast(0.79_7884_5608 ,x.dtype)
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x)))
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor(lowerCAmelCase_)
_SCREAMING_SNAKE_CASE =tf.cast(1.702 ,x.dtype)
return x * tf.math.sigmoid(coeff * x)
def lowerCamelCase( a__):
return tf.clip_by_value(_gelu(lowerCAmelCase_) ,-10 ,10)
def lowerCamelCase( a__ ,a__=-1):
_SCREAMING_SNAKE_CASE =tf.split(lowerCAmelCase_ ,2 ,axis=lowerCAmelCase_)
return a * tf.math.sigmoid(lowerCAmelCase_)
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def lowerCamelCase( a__):
return tf.keras.activations.gelu(lowerCAmelCase_ ,approximate=lowerCAmelCase_)
snake_case_ : int = tf.keras.activations.gelu
snake_case_ : Any = approximate_gelu_wrap
else:
snake_case_ : List[Any] = _gelu
snake_case_ : Any = _gelu_new
snake_case_ : int = {
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def lowerCamelCase( a__):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys())}")
| 703
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = LEDTokenizer
UpperCAmelCase = LEDTokenizerFast
UpperCAmelCase = True
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_SCREAMING_SNAKE_CASE ={'''unk_token''': '''<unk>'''}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __UpperCamelCase ( self : str , **_a : int ) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : List[str] , **_a : List[Any] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Dict , _a : Any ) -> Dict:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_SCREAMING_SNAKE_CASE =[0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_SCREAMING_SNAKE_CASE =tokenizer(_a , max_length=len(_a ) , padding=_a , return_tensors='''pt''' )
self.assertIsInstance(_a , _a )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_SCREAMING_SNAKE_CASE =batch.input_ids.tolist()[0]
self.assertListEqual(_a , _a )
@require_torch
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_SCREAMING_SNAKE_CASE =tokenizer(_a , padding=_a , return_tensors='''pt''' )
self.assertIn('''input_ids''' , _a )
self.assertIn('''attention_mask''' , _a )
self.assertNotIn('''labels''' , _a )
self.assertNotIn('''decoder_attention_mask''' , _a )
@require_torch
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_SCREAMING_SNAKE_CASE =tokenizer(text_target=_a , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_SCREAMING_SNAKE_CASE =tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=_a , truncation=_a , return_tensors='''pt''' )
self.assertIsInstance(_a , _a )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''A long paragraph for summarization.''']
_SCREAMING_SNAKE_CASE =[
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_SCREAMING_SNAKE_CASE =tokenizer(_a , return_tensors='''pt''' )
_SCREAMING_SNAKE_CASE =tokenizer(text_target=_a , return_tensors='''pt''' )
_SCREAMING_SNAKE_CASE =inputs['''input_ids''']
_SCREAMING_SNAKE_CASE =targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_SCREAMING_SNAKE_CASE =['''Summary of the text.''', '''Another summary.''']
_SCREAMING_SNAKE_CASE =[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_SCREAMING_SNAKE_CASE =tokenizer(_a , padding=_a )
_SCREAMING_SNAKE_CASE =[[0] * len(_a ) for x in encoded_output['''input_ids''']]
_SCREAMING_SNAKE_CASE =tokenizer.pad(_a )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , _a )
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_SCREAMING_SNAKE_CASE =self.rust_tokenizer_class.from_pretrained(_a , **_a )
_SCREAMING_SNAKE_CASE =self.tokenizer_class.from_pretrained(_a , **_a )
_SCREAMING_SNAKE_CASE ='''A, <mask> AllenNLP sentence.'''
_SCREAMING_SNAKE_CASE =tokenizer_r.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a )
_SCREAMING_SNAKE_CASE =tokenizer_p.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_SCREAMING_SNAKE_CASE =tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_SCREAMING_SNAKE_CASE =tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
_a , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_a , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 191
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 560
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = "dpt"
def __init__( self : List[str] ,_snake_case : Union[str, Any]=768 ,_snake_case : int=12 ,_snake_case : int=12 ,_snake_case : List[str]=3_072 ,_snake_case : List[str]="gelu" ,_snake_case : str=0.0 ,_snake_case : int=0.0 ,_snake_case : Optional[Any]=0.02 ,_snake_case : Any=1e-12 ,_snake_case : Tuple=384 ,_snake_case : int=16 ,_snake_case : Tuple=3 ,_snake_case : Optional[int]=False ,_snake_case : int=True ,_snake_case : Optional[int]=[2, 5, 8, 11] ,_snake_case : List[str]="project" ,_snake_case : Any=[4, 2, 1, 0.5] ,_snake_case : Union[str, Any]=[96, 192, 384, 768] ,_snake_case : List[str]=256 ,_snake_case : int=-1 ,_snake_case : Any=False ,_snake_case : List[Any]=True ,_snake_case : Tuple=0.4 ,_snake_case : int=255 ,_snake_case : Dict=0.1 ,_snake_case : Dict=[1, 1_024, 24, 24] ,_snake_case : Optional[Any]=[0, 1] ,_snake_case : List[str]=None ,**_snake_case : Optional[int] ,) -> Optional[int]:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : Union[str, Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
lowercase__ : Union[str, Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
lowercase__ : Dict = BitConfig(**_snake_case )
elif isinstance(_snake_case ,_snake_case ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
lowercase__ : Tuple = BitConfig(**_snake_case )
elif isinstance(_snake_case ,_snake_case ):
lowercase__ : Optional[int] = backbone_config
else:
raise ValueError(
f"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
lowercase__ : Optional[Any] = backbone_featmap_shape
lowercase__ : Tuple = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
lowercase__ : List[str] = None
lowercase__ : Any = None
lowercase__ : Dict = []
lowercase__ : str = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : Any = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : Optional[int] = initializer_range
lowercase__ : List[str] = layer_norm_eps
lowercase__ : Optional[Any] = image_size
lowercase__ : Any = patch_size
lowercase__ : Any = num_channels
lowercase__ : Optional[Any] = qkv_bias
lowercase__ : Any = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
lowercase__ : str = readout_type
lowercase__ : Union[str, Any] = reassemble_factors
lowercase__ : int = neck_hidden_sizes
lowercase__ : List[str] = fusion_hidden_size
lowercase__ : Optional[int] = head_in_index
lowercase__ : Dict = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowercase__ : Tuple = use_auxiliary_head
lowercase__ : List[str] = auxiliary_loss_weight
lowercase__ : Tuple = semantic_loss_ignore_index
lowercase__ : Tuple = semantic_classifier_dropout
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowercase__ : List[Any] = self.backbone_config.to_dict()
lowercase__ : List[str] = self.__class__.model_type
return output
| 560
| 1
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class lowerCAmelCase_ :
lowerCamelCase_ = 42
lowerCamelCase_ = None
# Automatically constructed
lowerCamelCase_ = "dict"
lowerCamelCase_ = None
lowerCamelCase_ = field(default='Translation' ,init=_a ,repr=_a)
def __call__( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def _snake_case ( self : Tuple ) ->Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return {k: Value("string" ) for k in sorted(self.languages )}
@dataclass
class lowerCAmelCase_ :
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
# Automatically constructed
lowerCamelCase_ = "dict"
lowerCamelCase_ = None
lowerCamelCase_ = field(default='TranslationVariableLanguages' ,init=_a ,repr=_a)
def _snake_case ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
a__ :Tuple = sorted(set(self.languages ) ) if self.languages else None
a__ :Tuple = len(self.languages ) if self.languages else None
def __call__( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} )
def _snake_case ( self : Optional[int] , __A : Union[str, Any] ) ->int:
"""simple docstring"""
a__ :Optional[Any] = set(self.languages )
if self.languages and set(__A ) - lang_set:
raise ValueError(
F'''Some languages in example ({', '.join(sorted(set(__A ) - lang_set ) )}) are not in valid set ({', '.join(__A )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
a__ :Union[str, Any] = []
for lang, text in translation_dict.items():
if isinstance(__A , __A ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
a__ , a__ :Optional[Any] = zip(*sorted(__A ) )
return {"language": languages, "translation": translations}
def _snake_case ( self : int ) ->Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value("string" ) ),
"translation": Sequence(Value("string" ) ),
}
| 373
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowerCAmelCase_ :
def __init__( self : str , __A : str = "cpu" , __A : str = "openai/clip-vit-large-patch14" ) ->None:
"""simple docstring"""
a__ :List[str] = device
a__ :Optional[Any] = CLIPTokenizerFast.from_pretrained(__A )
a__ :str = [0.48_145_466, 0.4_578_275, 0.40_821_073]
a__ :Any = [0.26_862_954, 0.26_130_258, 0.27_577_711]
a__ :Union[str, Any] = torchvision.transforms.Normalize(self.image_mean , self.image_std )
a__ :List[Any] = torchvision.transforms.Resize(224 )
a__ :Optional[Any] = torchvision.transforms.CenterCrop(224 )
def _snake_case ( self : int , __A : int ) ->List[Any]:
"""simple docstring"""
a__ :List[str] = self.resize(__A )
a__ :Union[str, Any] = self.center_crop(__A )
a__ :List[str] = self.normalize(__A )
return images
def __call__( self : Union[str, Any] , __A : Dict=None , __A : List[Any]=None , **__A : Union[str, Any] ) ->Tuple:
"""simple docstring"""
a__ :Optional[int] = self.tokenizer(text=__A , **__A )
a__ :str = self.preprocess_img(__A )
a__ :Any = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowerCAmelCase_ ( nn.Module):
def __init__( self : List[str] , __A : List[str]=10 , __A : List[Any]=0.01 , __A : Optional[int]=None , __A : List[str]=None , __A : Dict=None , __A : List[str]=None , __A : Tuple=None , __A : str=None , __A : Any=False , __A : Union[str, Any]=True , __A : Tuple="image" , __A : Tuple=True , __A : int=False , __A : Tuple=False , __A : Union[str, Any]=False , ) ->None:
"""simple docstring"""
super().__init__()
a__ :List[str] = None
a__ :int = device if device else get_device()
if vqgan:
a__ :Any = vqgan
else:
a__ :Optional[int] = load_vqgan(self.device , conf_path=__A , ckpt_path=__A )
self.vqgan.eval()
if clip:
a__ :Dict = clip
else:
a__ :Any = CLIPModel.from_pretrained("openai/clip-vit-base-patch32" )
self.clip.to(self.device )
a__ :Optional[int] = ProcessorGradientFlow(device=self.device )
a__ :Any = iterations
a__ :Any = lr
a__ :List[str] = log
a__ :List[Any] = make_grid
a__ :Optional[int] = return_val
a__ :Optional[Any] = quantize
a__ :Any = self.vqgan.decoder.z_shape
def _snake_case ( self : str , __A : str=None , __A : Optional[int]=None , __A : int=5 , __A : Any=True ) ->Optional[int]:
"""simple docstring"""
a__ :Optional[int] = []
if output_path is None:
a__ :Any = "./animation.gif"
if input_path is None:
a__ :Dict = self.save_path
a__ :Dict = sorted(glob(input_path + "/*" ) )
if not len(__A ):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)" )
if len(__A ) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)" )
a__ :str = total_duration / len(__A )
a__ :str = [frame_duration] * len(__A )
if extend_frames:
a__ :Optional[Any] = 1.5
a__ :Optional[Any] = 3
for file_name in paths:
if file_name.endswith(".png" ):
images.append(imageio.imread(__A ) )
imageio.mimsave(__A , __A , duration=__A )
print(F'''gif saved to {output_path}''' )
def _snake_case ( self : str , __A : Tuple=None , __A : Optional[int]=None ) ->Optional[int]:
"""simple docstring"""
if not (path or img):
raise ValueError("Input either path or tensor" )
if img is not None:
raise NotImplementedError
a__ :List[Any] = preprocess(Image.open(__A ) , target_image_size=256 ).to(self.device )
a__ :List[str] = preprocess_vqgan(__A )
a__ , *a__ :Union[str, Any] = self.vqgan.encode(__A )
return z
def _snake_case ( self : List[str] , __A : Any ) ->Optional[int]:
"""simple docstring"""
a__ :Dict = self.latent.detach().requires_grad_()
a__ :int = base_latent + transform_vector
if self.quantize:
a__ , *a__ :str = self.vqgan.quantize(__A )
else:
a__ :List[str] = trans_latent
return self.vqgan.decode(__A )
def _snake_case ( self : List[str] , __A : Optional[Any] , __A : Optional[Any] , __A : List[Any]=None ) ->str:
"""simple docstring"""
a__ :Dict = self.clip_preprocessor(text=__A , images=__A , return_tensors="pt" , padding=__A )
a__ :List[str] = self.clip(**__A )
a__ :Optional[int] = clip_outputs.logits_per_image
if weights is not None:
a__ :Tuple = similarity_logits * weights
return similarity_logits.sum()
def _snake_case ( self : List[str] , __A : List[Any] , __A : Optional[int] , __A : Optional[int] ) ->int:
"""simple docstring"""
a__ :str = self._get_clip_similarity(pos_prompts["prompts"] , __A , weights=(1 / pos_prompts["weights"]) )
if neg_prompts:
a__ :Optional[int] = self._get_clip_similarity(neg_prompts["prompts"] , __A , weights=neg_prompts["weights"] )
else:
a__ :Tuple = torch.tensor([1] , device=self.device )
a__ :Dict = -torch.log(__A ) + torch.log(__A )
return loss
def _snake_case ( self : int , __A : Dict , __A : Union[str, Any] , __A : List[str] ) ->List[str]:
"""simple docstring"""
a__ :List[Any] = torch.randn_like(self.latent , requires_grad=__A , device=self.device )
a__ :Optional[Any] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
a__ :Tuple = self._add_vector(__A )
a__ :Dict = loop_post_process(__A )
a__ :str = self._get_CLIP_loss(__A , __A , __A )
print("CLIP loss" , __A )
if self.log:
wandb.log({"CLIP Loss": clip_loss} )
clip_loss.backward(retain_graph=__A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def _snake_case ( self : Union[str, Any] , __A : Optional[Any] , __A : Union[str, Any] , __A : Optional[Any] ) ->Optional[int]:
"""simple docstring"""
wandb.init(reinit=__A , project="face-editor" )
wandb.config.update({"Positive Prompts": positive_prompts} )
wandb.config.update({"Negative Prompts": negative_prompts} )
wandb.config.update({"lr": self.lr, "iterations": self.iterations} )
if image_path:
a__ :List[str] = Image.open(__A )
a__ :Any = image.resize((256, 256) )
wandb.log("Original Image" , wandb.Image(__A ) )
def _snake_case ( self : Union[str, Any] , __A : Union[str, Any] ) ->int:
"""simple docstring"""
if not prompts:
return []
a__ :List[str] = []
a__ :Tuple = []
if isinstance(__A , __A ):
a__ :Tuple = [prompt.strip() for prompt in prompts.split("|" )]
for prompt in prompts:
if isinstance(__A , (tuple, list) ):
a__ :str = prompt[0]
a__ :List[Any] = float(prompt[1] )
elif ":" in prompt:
a__ , a__ :Tuple = prompt.split(":" )
a__ :Dict = float(__A )
else:
a__ :int = prompt
a__ :Any = 1.0
processed_prompts.append(__A )
weights.append(__A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__A , device=self.device ),
}
def _snake_case ( self : Optional[Any] , __A : Any , __A : int=None , __A : int=None , __A : str=True , __A : List[Any]=False , __A : Optional[int]=True , __A : Any=True , __A : Optional[Any]=None , ) ->Optional[Any]:
"""simple docstring"""
if image_path:
a__ :Optional[int] = self._get_latent(__A )
else:
a__ :Dict = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__A , __A , __A )
assert pos_prompts, "You must provide at least one positive prompt."
a__ :int = self.process_prompts(__A )
a__ :Any = self.process_prompts(__A )
if save_final and save_path is None:
a__ :Dict = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"] ) )
if not os.path.exists(__A ):
os.makedirs(__A )
else:
a__ :str = save_path + "_" + get_timestamp()
os.makedirs(__A )
a__ :Any = save_path
a__ :Optional[int] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("Original Image" )
show_pil(custom_to_pil(__A ) )
a__ :List[str] = loop_post_process(__A )
for iter, transformed_img in enumerate(self._optimize_CLIP(__A , __A , __A ) ):
if show_intermediate:
show_pil(__A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({"Image": wandb.Image(__A )} )
if show_final:
show_pil(__A )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''' ) )
| 373
| 1
|
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowerCAmelCase :
'''simple docstring'''
def __A ( self ) -> Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=a__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __A ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=a__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a__ )
SCREAMING_SNAKE_CASE = inputs['''prompt''']
SCREAMING_SNAKE_CASE = inputs['''generator''']
SCREAMING_SNAKE_CASE = inputs['''num_inference_steps''']
SCREAMING_SNAKE_CASE = inputs['''output_type''']
if "image" in inputs:
SCREAMING_SNAKE_CASE = inputs['''image''']
else:
SCREAMING_SNAKE_CASE = None
if "mask_image" in inputs:
SCREAMING_SNAKE_CASE = inputs['''mask_image''']
else:
SCREAMING_SNAKE_CASE = None
if "original_image" in inputs:
SCREAMING_SNAKE_CASE = inputs['''original_image''']
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = pipe.encode_prompt(a__ )
# inputs with prompt converted to embeddings
SCREAMING_SNAKE_CASE = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
SCREAMING_SNAKE_CASE = image
if mask_image is not None:
SCREAMING_SNAKE_CASE = mask_image
if original_image is not None:
SCREAMING_SNAKE_CASE = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(a__ , a__ , a__ )
SCREAMING_SNAKE_CASE = pipe(**a__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a__ )
SCREAMING_SNAKE_CASE = self.pipeline_class.from_pretrained(a__ )
pipe_loaded.to(a__ )
pipe_loaded.set_progress_bar_config(disable=a__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a__ , a__ ) is None , F'`{optional_component}` did not stay set to None after loading.' , )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a__ )
SCREAMING_SNAKE_CASE = inputs['''generator''']
SCREAMING_SNAKE_CASE = inputs['''num_inference_steps''']
SCREAMING_SNAKE_CASE = inputs['''output_type''']
# inputs with prompt converted to embeddings
SCREAMING_SNAKE_CASE = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
SCREAMING_SNAKE_CASE = image
if mask_image is not None:
SCREAMING_SNAKE_CASE = mask_image
if original_image is not None:
SCREAMING_SNAKE_CASE = original_image
SCREAMING_SNAKE_CASE = pipe_loaded(**a__ )[0]
SCREAMING_SNAKE_CASE = np.abs(to_np(a__ ) - to_np(a__ ) ).max()
self.assertLess(a__ , 1e-4 )
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a__ )
SCREAMING_SNAKE_CASE = pipe(**a__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a__ )
SCREAMING_SNAKE_CASE = self.pipeline_class.from_pretrained(a__ )
pipe_loaded.to(a__ )
pipe_loaded.set_progress_bar_config(disable=a__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a__ )
SCREAMING_SNAKE_CASE = pipe_loaded(**a__ )[0]
SCREAMING_SNAKE_CASE = np.abs(to_np(a__ ) - to_np(a__ ) ).max()
self.assertLess(a__ , 1e-4 )
| 247
|
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCAmelCase ( snake_case : int , snake_case : int , snake_case : float = 1 / sqrt(2 ) ):
_lowerCAmelCase:Union[str, Any] = tau * frequency / samplerate
_lowerCAmelCase:str = sin(snake_case )
_lowerCAmelCase:List[str] = cos(snake_case )
_lowerCAmelCase:Optional[int] = _sin / (2 * q_factor)
_lowerCAmelCase:int = (1 - _cos) / 2
_lowerCAmelCase:Any = 1 - _cos
_lowerCAmelCase:Any = 1 + alpha
_lowerCAmelCase:Optional[Any] = -2 * _cos
_lowerCAmelCase:str = 1 - alpha
_lowerCAmelCase:Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( snake_case : int , snake_case : int , snake_case : float = 1 / sqrt(2 ) ):
_lowerCAmelCase:Any = tau * frequency / samplerate
_lowerCAmelCase:Dict = sin(snake_case )
_lowerCAmelCase:List[str] = cos(snake_case )
_lowerCAmelCase:Tuple = _sin / (2 * q_factor)
_lowerCAmelCase:Tuple = (1 + _cos) / 2
_lowerCAmelCase:Dict = -1 - _cos
_lowerCAmelCase:Optional[Any] = 1 + alpha
_lowerCAmelCase:Any = -2 * _cos
_lowerCAmelCase:List[str] = 1 - alpha
_lowerCAmelCase:int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( snake_case : int , snake_case : int , snake_case : float = 1 / sqrt(2 ) ):
_lowerCAmelCase:Any = tau * frequency / samplerate
_lowerCAmelCase:Optional[Any] = sin(snake_case )
_lowerCAmelCase:Optional[int] = cos(snake_case )
_lowerCAmelCase:List[Any] = _sin / (2 * q_factor)
_lowerCAmelCase:Any = _sin / 2
_lowerCAmelCase:Tuple = 0
_lowerCAmelCase:List[str] = -ba
_lowerCAmelCase:Union[str, Any] = 1 + alpha
_lowerCAmelCase:List[str] = -2 * _cos
_lowerCAmelCase:Any = 1 - alpha
_lowerCAmelCase:Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( snake_case : int , snake_case : int , snake_case : float = 1 / sqrt(2 ) ):
_lowerCAmelCase:Optional[Any] = tau * frequency / samplerate
_lowerCAmelCase:Dict = sin(snake_case )
_lowerCAmelCase:Optional[Any] = cos(snake_case )
_lowerCAmelCase:Optional[Any] = _sin / (2 * q_factor)
_lowerCAmelCase:Union[str, Any] = 1 - alpha
_lowerCAmelCase:int = -2 * _cos
_lowerCAmelCase:Tuple = 1 + alpha
_lowerCAmelCase:List[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( snake_case : int , snake_case : int , snake_case : float , snake_case : float = 1 / sqrt(2 ) , ):
_lowerCAmelCase:Dict = tau * frequency / samplerate
_lowerCAmelCase:str = sin(snake_case )
_lowerCAmelCase:Optional[Any] = cos(snake_case )
_lowerCAmelCase:Optional[int] = _sin / (2 * q_factor)
_lowerCAmelCase:Dict = 10 ** (gain_db / 40)
_lowerCAmelCase:Optional[int] = 1 + alpha * big_a
_lowerCAmelCase:Union[str, Any] = -2 * _cos
_lowerCAmelCase:Optional[int] = 1 - alpha * big_a
_lowerCAmelCase:List[Any] = 1 + alpha / big_a
_lowerCAmelCase:Any = -2 * _cos
_lowerCAmelCase:Dict = 1 - alpha / big_a
_lowerCAmelCase:Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( snake_case : int , snake_case : int , snake_case : float , snake_case : float = 1 / sqrt(2 ) , ):
_lowerCAmelCase:Optional[int] = tau * frequency / samplerate
_lowerCAmelCase:List[str] = sin(snake_case )
_lowerCAmelCase:List[str] = cos(snake_case )
_lowerCAmelCase:Optional[int] = _sin / (2 * q_factor)
_lowerCAmelCase:Dict = 10 ** (gain_db / 40)
_lowerCAmelCase:List[Any] = (big_a + 1) - (big_a - 1) * _cos
_lowerCAmelCase:Dict = (big_a + 1) + (big_a - 1) * _cos
_lowerCAmelCase:Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
_lowerCAmelCase:Tuple = (big_a - 1) + (big_a + 1) * _cos
_lowerCAmelCase:Dict = 2 * sqrt(snake_case ) * alpha
_lowerCAmelCase:Tuple = big_a * (pmc + aaa)
_lowerCAmelCase:List[str] = 2 * big_a * mpc
_lowerCAmelCase:int = big_a * (pmc - aaa)
_lowerCAmelCase:str = ppmc + aaa
_lowerCAmelCase:List[Any] = -2 * pmpc
_lowerCAmelCase:Dict = ppmc - aaa
_lowerCAmelCase:Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( snake_case : int , snake_case : int , snake_case : float , snake_case : float = 1 / sqrt(2 ) , ):
_lowerCAmelCase:int = tau * frequency / samplerate
_lowerCAmelCase:List[str] = sin(snake_case )
_lowerCAmelCase:Tuple = cos(snake_case )
_lowerCAmelCase:Dict = _sin / (2 * q_factor)
_lowerCAmelCase:Any = 10 ** (gain_db / 40)
_lowerCAmelCase:List[Any] = (big_a + 1) - (big_a - 1) * _cos
_lowerCAmelCase:List[Any] = (big_a + 1) + (big_a - 1) * _cos
_lowerCAmelCase:Any = (big_a - 1) - (big_a + 1) * _cos
_lowerCAmelCase:Dict = (big_a - 1) + (big_a + 1) * _cos
_lowerCAmelCase:str = 2 * sqrt(snake_case ) * alpha
_lowerCAmelCase:Union[str, Any] = big_a * (ppmc + aaa)
_lowerCAmelCase:Optional[int] = -2 * big_a * pmpc
_lowerCAmelCase:str = big_a * (ppmc - aaa)
_lowerCAmelCase:Dict = pmc + aaa
_lowerCAmelCase:Optional[Any] = 2 * mpc
_lowerCAmelCase:Optional[int] = pmc - aaa
_lowerCAmelCase:Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 227
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = '''lxmert'''
UpperCAmelCase_ : Tuple = {}
def __init__( self ,lowerCamelCase_=30522 ,lowerCamelCase_=768 ,lowerCamelCase_=12 ,lowerCamelCase_=9500 ,lowerCamelCase_=1600 ,lowerCamelCase_=400 ,lowerCamelCase_=3072 ,lowerCamelCase_="gelu" ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=512 ,lowerCamelCase_=2 ,lowerCamelCase_=0.02 ,lowerCamelCase_=1e-12 ,lowerCamelCase_=9 ,lowerCamelCase_=5 ,lowerCamelCase_=5 ,lowerCamelCase_=2048 ,lowerCamelCase_=4 ,lowerCamelCase_=6.67 ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=True ,**lowerCamelCase_ ,) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Any = num_attention_heads
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : Optional[Any] = intermediate_size
UpperCAmelCase__ : Tuple = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = max_position_embeddings
UpperCAmelCase__ : str = type_vocab_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Tuple = layer_norm_eps
UpperCAmelCase__ : Union[str, Any] = num_qa_labels
UpperCAmelCase__ : List[Any] = num_object_labels
UpperCAmelCase__ : int = num_attr_labels
UpperCAmelCase__ : Tuple = l_layers
UpperCAmelCase__ : Any = x_layers
UpperCAmelCase__ : str = r_layers
UpperCAmelCase__ : Dict = visual_feat_dim
UpperCAmelCase__ : List[Any] = visual_pos_dim
UpperCAmelCase__ : Union[str, Any] = visual_loss_normalizer
UpperCAmelCase__ : Optional[int] = task_matched
UpperCAmelCase__ : Any = task_mask_lm
UpperCAmelCase__ : List[Any] = task_obj_predict
UpperCAmelCase__ : int = task_qa
UpperCAmelCase__ : Optional[Any] = visual_obj_loss
UpperCAmelCase__ : Tuple = visual_attr_loss
UpperCAmelCase__ : str = visual_feat_loss
UpperCAmelCase__ : str = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**lowerCamelCase_ )
| 715
|
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __UpperCamelCase( _A : str ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def __UpperCamelCase( _A : int , _A : str ):
'''simple docstring'''
return (-y * np.log(_A ) - (1 - y) * np.log(1 - h )).mean()
def __UpperCamelCase( _A : Optional[Any] , _A : Tuple , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = np.dot(_A , _A )
return np.sum(y * scores - np.log(1 + np.exp(_A ) ) )
def __UpperCamelCase( _A : Dict , _A : Optional[int] , _A : List[Any] , _A : str=7_00_00 ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = np.zeros(x.shape[1] )
for iterations in range(_A ):
UpperCAmelCase__ : Tuple = np.dot(_A , _A )
UpperCAmelCase__ : Optional[Any] = sigmoid_function(_A )
UpperCAmelCase__ : Optional[int] = np.dot(x.T , h - y ) / y.size
UpperCAmelCase__ : List[Any] = theta - alpha * gradient # updating the weights
UpperCAmelCase__ : Tuple = np.dot(_A , _A )
UpperCAmelCase__ : Tuple = sigmoid_function(_A )
UpperCAmelCase__ : Optional[int] = cost_function(_A , _A )
if iterations % 1_00 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCamelCase__ : str = datasets.load_iris()
UpperCamelCase__ : Tuple = iris.data[:, :2]
UpperCamelCase__ : str = (iris.target != 0) * 1
UpperCamelCase__ : Any = 0.1
UpperCamelCase__ : List[str] = logistic_reg(alpha, x, y, max_iterations=70_000)
print('theta: ', theta) # printing the theta i.e our weights vector
def __UpperCamelCase( _A : Any ):
'''simple docstring'''
return sigmoid_function(
np.dot(_A , _A ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((UpperCamelCase__) , (UpperCamelCase__)) : Any = (x[:, 0].min(), x[:, 0].max())
((UpperCamelCase__) , (UpperCamelCase__)) : Optional[Any] = (x[:, 1].min(), x[:, 1].max())
((UpperCamelCase__) , (UpperCamelCase__)) : Any = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCamelCase__ : str = np.c_[xxa.ravel(), xxa.ravel()]
UpperCamelCase__ : Union[str, Any] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 496
| 0
|
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> str:
"""simple docstring"""
return " ".join(
''.join(word[::-1] ) if len(snake_case__ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 105
|
import tensorflow as tf
from ...tf_utils import shape_list
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1 , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__(**lowerCAmelCase_ )
_A = vocab_size
_A = d_embed
_A = d_proj
_A = cutoffs + [vocab_size]
_A = [0] + self.cutoffs
_A = div_val
_A = self.cutoffs[0]
_A = len(self.cutoffs ) - 1
_A = self.shortlist_size + self.n_clusters
_A = keep_order
_A = []
_A = []
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
if self.n_clusters > 0:
_A = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=lowerCAmelCase_ , name="""cluster_weight""" )
_A = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=lowerCAmelCase_ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
_A = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=lowerCAmelCase_ , name=F'''out_projs_._{i}''' , )
self.out_projs.append(lowerCAmelCase_ )
else:
self.out_projs.append(lowerCAmelCase_ )
_A = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=lowerCAmelCase_ , name=F'''out_layers_._{i}_._weight''' , )
_A = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=lowerCAmelCase_ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
_A , _A = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_A = self.d_embed // (self.div_val**i)
_A = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=lowerCAmelCase_ , name=F'''out_projs_._{i}''' )
self.out_projs.append(lowerCAmelCase_ )
_A = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=lowerCAmelCase_ , name=F'''out_layers_._{i}_._weight''' , )
_A = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=lowerCAmelCase_ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(lowerCAmelCase_ )
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> List[Any]:
_A = x
if proj is not None:
_A = tf.einsum("""ibd,ed->ibe""" , lowerCAmelCase_ , lowerCAmelCase_ )
return tf.einsum("""ibd,nd->ibn""" , lowerCAmelCase_ , lowerCAmelCase_ ) + b
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_A = shape_list(lowerCAmelCase_ )
_A = tf.range(lp_size[0] , dtype=target.dtype )
_A = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False ) -> Optional[Any]:
_A = 0
if self.n_clusters == 0:
_A = self._logit(lowerCAmelCase_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
_A = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCAmelCase_ , logits=lowerCAmelCase_ )
_A = tf.nn.log_softmax(lowerCAmelCase_ , axis=-1 )
else:
_A = shape_list(lowerCAmelCase_ )
_A = []
_A = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
_A , _A = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
_A = (target >= l_idx) & (target < r_idx)
_A = tf.where(lowerCAmelCase_ )
_A = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ ) - l_idx
if self.div_val == 1:
_A = self.out_layers[0][0][l_idx:r_idx]
_A = self.out_layers[0][1][l_idx:r_idx]
else:
_A = self.out_layers[i][0]
_A = self.out_layers[i][1]
if i == 0:
_A = tf.concat([cur_W, self.cluster_weight] , 0 )
_A = tf.concat([cur_b, self.cluster_bias] , 0 )
_A = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[0] )
_A = tf.nn.log_softmax(lowerCAmelCase_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
_A = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_A = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
else:
_A = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[i] )
_A = tf.nn.log_softmax(lowerCAmelCase_ )
_A = self.cutoffs[0] + i - 1 # No probability for the head cluster
_A = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCAmelCase_ )
if target is not None:
_A = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_A = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_A = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCAmelCase_ , -cur_logprob , shape_list(lowerCAmelCase_ ) )
_A = tf.concat(lowerCAmelCase_ , axis=-1 )
if target is not None:
if return_mean:
_A = tf.reduce_mean(lowerCAmelCase_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCAmelCase_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCAmelCase_ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 401
| 0
|
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
__SCREAMING_SNAKE_CASE : Tuple =version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
__SCREAMING_SNAKE_CASE : str ='''\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
'''
__SCREAMING_SNAKE_CASE : List[str] ='''\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
'''
__SCREAMING_SNAKE_CASE : Optional[Any] ='''
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
\'meteor\': meteor score.
Examples:
>>> meteor = datasets.load_metric(\'meteor\')
>>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
>>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results["meteor"], 4))
0.6944
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] , )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : str ):
import nltk
nltk.download("""wordnet""" )
if NLTK_VERSION >= version.Version("""3.6.5""" ):
nltk.download("""punkt""" )
if NLTK_VERSION >= version.Version("""3.6.6""" ):
nltk.download("""omw-1.4""" )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : str , snake_case__ : str , snake_case__ : int=0.9 , snake_case__ : List[Any]=3 , snake_case__ : Any=0.5 ):
if NLTK_VERSION >= version.Version("""3.6.5""" ):
lowercase = [
meteor_score.single_meteor_score(
word_tokenize(snake_case__ ) , word_tokenize(snake_case__ ) , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
else:
lowercase = [
meteor_score.single_meteor_score(snake_case__ , snake_case__ , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
return {"meteor": np.mean(snake_case__ )}
| 72
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__SCREAMING_SNAKE_CASE : List[str] =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any =OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
__SCREAMING_SNAKE_CASE : Tuple =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def UpperCamelCase__ ( lowerCAmelCase__ ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowercase = model_type_to_module_name(lowerCAmelCase__ )
lowercase = importlib.import_module(f""".{module_name}""" ,"""transformers.models""" )
try:
return getattr(lowerCAmelCase__ ,lowerCAmelCase__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase__ ,"""__name__""" ,lowerCAmelCase__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowercase = importlib.import_module("""transformers""" )
if hasattr(lowerCAmelCase__ ,lowerCAmelCase__ ):
return getattr(lowerCAmelCase__ ,lowerCAmelCase__ )
return None
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = None ,lowerCAmelCase__ = False ,lowerCAmelCase__ = False ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ,lowerCAmelCase__ = False ,**lowerCAmelCase__ ,):
lowercase = get_file_from_repo(
lowerCAmelCase__ ,lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,use_auth_token=lowerCAmelCase__ ,revision=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,)
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(lowerCAmelCase__ ,encoding="""utf-8""" ) as reader:
return json.load(lowerCAmelCase__ )
class A_ :
def __init__( self : List[Any] ):
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( cls : Dict , snake_case__ : Tuple , **snake_case__ : int ):
lowercase = kwargs.pop("""config""" , snake_case__ )
lowercase = kwargs.pop("""trust_remote_code""" , snake_case__ )
lowercase = True
lowercase , lowercase = FeatureExtractionMixin.get_feature_extractor_dict(snake_case__ , **snake_case__ )
lowercase = config_dict.get("""feature_extractor_type""" , snake_case__ )
lowercase = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
lowercase = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(snake_case__ , snake_case__ ):
lowercase = AutoConfig.from_pretrained(snake_case__ , **snake_case__ )
# It could be in `config.feature_extractor_type``
lowercase = getattr(snake_case__ , """feature_extractor_type""" , snake_case__ )
if hasattr(snake_case__ , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
lowercase = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
lowercase = feature_extractor_class_from_name(snake_case__ )
lowercase = feature_extractor_auto_map is not None
lowercase = feature_extractor_class is not None or type(snake_case__ ) in FEATURE_EXTRACTOR_MAPPING
lowercase = resolve_trust_remote_code(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if has_remote_code and trust_remote_code:
lowercase = get_class_from_dynamic_module(
snake_case__ , snake_case__ , **snake_case__ )
lowercase = kwargs.pop("""code_revision""" , snake_case__ )
if os.path.isdir(snake_case__ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(snake_case__ , **snake_case__ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(snake_case__ , **snake_case__ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(snake_case__ ) in FEATURE_EXTRACTOR_MAPPING:
lowercase = FEATURE_EXTRACTOR_MAPPING[type(snake_case__ )]
return feature_extractor_class.from_dict(snake_case__ , **snake_case__ )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case__ : Optional[int] , snake_case__ : List[str] ):
FEATURE_EXTRACTOR_MAPPING.register(snake_case__ , snake_case__ )
| 72
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "gpt_neo"
UpperCAmelCase__ = ["past_key_values"]
UpperCAmelCase__ = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : Union[str, Any] , __snake_case : Tuple=5_0_2_5_7 , __snake_case : Optional[int]=2_0_4_8 , __snake_case : List[str]=2_0_4_8 , __snake_case : List[Any]=2_4 , __snake_case : List[str]=[[["global", "local"], 1_2]] , __snake_case : Union[str, Any]=1_6 , __snake_case : str=None , __snake_case : List[Any]=2_5_6 , __snake_case : Any="gelu_new" , __snake_case : Any=0.0 , __snake_case : Optional[Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : List[Any]=0.1 , __snake_case : Optional[Any]=1E-5 , __snake_case : Optional[int]=0.02 , __snake_case : Union[str, Any]=True , __snake_case : Any=5_0_2_5_6 , __snake_case : str=5_0_2_5_6 , **__snake_case : Any , ) -> Union[str, Any]:
__magic_name__: Union[str, Any] = vocab_size
__magic_name__: List[str] = max_position_embeddings
__magic_name__: List[str] = hidden_size
__magic_name__: List[Any] = num_layers
__magic_name__: Dict = num_heads
__magic_name__: int = intermediate_size
__magic_name__: Tuple = window_size
__magic_name__: List[str] = activation_function
__magic_name__: List[str] = resid_dropout
__magic_name__: List[Any] = embed_dropout
__magic_name__: Any = attention_dropout
__magic_name__: int = classifier_dropout
__magic_name__: Any = layer_norm_epsilon
__magic_name__: Tuple = initializer_range
__magic_name__: Any = use_cache
__magic_name__: Any = bos_token_id
__magic_name__: int = eos_token_id
__magic_name__: Optional[Any] = attention_types
__magic_name__: List[Any] = self.expand_attention_types_params(__snake_case )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
F'`config.num_layers = {self.num_layers}`. '
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@staticmethod
def lowerCamelCase__ ( __snake_case : Optional[Any] ) -> Any:
__magic_name__: List[Any] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def a ( __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple ) -> Dict:
import torch
__magic_name__: List[str] = input.size()
__magic_name__: Dict = len(__UpperCAmelCase )
__magic_name__: Tuple = shape[dimension]
__magic_name__: Tuple = torch.arange(0 , __UpperCAmelCase , __UpperCAmelCase )
__magic_name__: Optional[int] = torch.div(sizedim - size , __UpperCAmelCase , rounding_mode="""floor""" ) + 1
__magic_name__: Optional[int] = torch.arange(__UpperCAmelCase ) + low_indices[:min_length][:, None]
__magic_name__: Optional[int] = [slice(__UpperCAmelCase )] * rank
__magic_name__: str = indices
__magic_name__: Optional[Any] = input[s]
__magic_name__: List[str] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__UpperCAmelCase )
def a ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] ) -> List[str]:
import torch
__magic_name__: Union[str, Any] = torch.arange(1 , __UpperCAmelCase )
__magic_name__: str = torch.remainder(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: List[str] = remainders == 0
__magic_name__: Any = candidates[divisor_indices]
__magic_name__: str = torch.max(__UpperCAmelCase )
return largest_divisor, torch.div(__UpperCAmelCase , __UpperCAmelCase , rounding_mode="""floor""" )
class __A ( SCREAMING_SNAKE_CASE_ ):
@property
def lowerCamelCase__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
__magic_name__: Union[str, Any] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction="""inputs""" )
__magic_name__: Union[str, Any] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__magic_name__: List[str] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
return self._config.num_heads
def lowerCamelCase__ ( self : str , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]:
__magic_name__: Dict = super(__snake_case , self ).generate_dummy_inputs(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
# We need to order the input in the way they appears in the forward()
__magic_name__: Optional[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__magic_name__, __magic_name__: Tuple = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__magic_name__: Optional[int] = seqlen + 2
__magic_name__: Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__magic_name__: int = [
(torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(self.num_layers )
]
__magic_name__: Union[str, Any] = common_inputs["""attention_mask"""]
if self.use_past:
__magic_name__: Optional[Any] = ordered_inputs["""attention_mask"""].dtype
__magic_name__: Union[str, Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
return 1_3
| 96
|
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = ["image_processor", "tokenizer"]
_UpperCAmelCase :Union[str, Any] = "BlipImageProcessor"
_UpperCAmelCase :Union[str, Any] = "AutoTokenizer"
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
# add QFormer tokenizer
lowercase__: Tuple = qformer_tokenizer
def __call__( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = None , **_UpperCAmelCase , ):
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
lowercase__: Tuple = BatchFeature()
if text is not None:
lowercase__: List[str] = self.tokenizer(
text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
encoding.update(_UpperCAmelCase )
lowercase__: str = self.qformer_tokenizer(
text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__: List[str] = qformer_text_encoding.pop('''input_ids''' )
lowercase__: int = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
lowercase__: Optional[int] = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase )
encoding.update(_UpperCAmelCase )
return encoding
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _snake_case ( self ):
lowercase__: List[str] = self.tokenizer.model_input_names
lowercase__: str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _snake_case ( self , _UpperCAmelCase , **_UpperCAmelCase ):
if os.path.isfile(_UpperCAmelCase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
lowercase__: Any = os.path.join(_UpperCAmelCase , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(_UpperCAmelCase )
return super().save_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def _snake_case ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
lowercase__: Optional[int] = AutoTokenizer.from_pretrained(_UpperCAmelCase , subfolder='''qformer_tokenizer''' )
lowercase__: Union[str, Any] = cls._get_arguments_from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
args.append(_UpperCAmelCase )
return cls(*_UpperCAmelCase )
| 586
| 0
|
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Any = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
_snake_case : Dict = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
_snake_case : Tuple = model.state_dict()
def to_tf_var_name(lowerCAmelCase_ ):
for patt, repl in iter(lowerCAmelCase_ ):
_snake_case : List[Any] = name.replace(lowerCAmelCase_ , lowerCAmelCase_ )
return f'''bert/{name}'''
def create_tf_var(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Optional[int] = tf.dtypes.as_dtype(tensor.dtype )
_snake_case : Any = tf.get_variable(dtype=lowerCAmelCase_ , shape=tensor.shape , name=lowerCAmelCase_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCAmelCase_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_snake_case : Dict = to_tf_var_name(lowerCAmelCase_ )
_snake_case : Tuple = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_snake_case : Any = torch_tensor.T
_snake_case : Dict = create_tf_var(tensor=lowerCAmelCase_ , name=lowerCAmelCase_ , session=lowerCAmelCase_ )
tf.keras.backend.set_value(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Optional[Any] = session.run(lowerCAmelCase_ )
print(f'''Successfully created {tf_name}: {np.allclose(lowerCAmelCase_ , lowerCAmelCase_ )}''' )
_snake_case : Optional[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def _a ( lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Directory in which to save tensorflow model''' )
_snake_case : Any = parser.parse_args(lowerCAmelCase_ )
_snake_case : Tuple = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCAmelCase_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 47
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase : Any = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
UpperCAmelCase : Optional[Any] = {
'gpt-neox-20b': 2_0_4_8,
}
class lowerCamelCase (a__ ):
_lowercase : Optional[int] = VOCAB_FILES_NAMES
_lowercase : str = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__=False , **lowercase__ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , )
_snake_case : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space:
_snake_case : int = getattr(lowercase__ , pre_tok_state.pop('''type''' ) )
_snake_case : int = add_prefix_space
_snake_case : Optional[Any] = pre_tok_class(**lowercase__ )
_snake_case : List[str] = add_prefix_space
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
"""simple docstring"""
_snake_case : Optional[int] = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> List[int]:
"""simple docstring"""
_snake_case : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
_snake_case : Dict = input_ids[-self.model_max_length :]
return input_ids
| 47
| 1
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCamelCase : Optional[Any] = numpy.array([0, 0])
lowerCamelCase : Optional[int] = numpy.array([0.5, 0.866_0254])
lowerCamelCase : Optional[Any] = numpy.array([1, 0])
lowerCamelCase : Dict = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _SCREAMING_SNAKE_CASE ( lowercase : list[numpy.ndarray] , lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = initial_vectors
for _ in range(lowercase ):
lowerCamelCase_ = iteration_step(lowercase )
return vectors
def _SCREAMING_SNAKE_CASE ( lowercase : list[numpy.ndarray] ):
'''simple docstring'''
lowerCamelCase_ = []
for i, start_vector in enumerate(vectors[:-1] ):
lowerCamelCase_ = vectors[i + 1]
new_vectors.append(lowercase )
lowerCamelCase_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _SCREAMING_SNAKE_CASE ( lowercase : numpy.ndarray , lowercase : float ):
'''simple docstring'''
lowerCamelCase_ = numpy.radians(lowercase )
lowerCamelCase_ , lowerCamelCase_ = numpy.cos(lowercase ), numpy.sin(lowercase )
lowerCamelCase_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowercase , lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : list[numpy.ndarray] ):
'''simple docstring'''
lowerCamelCase_ = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
lowerCamelCase_ , lowerCamelCase_ = zip(*lowercase )
plt.plot(lowercase , lowercase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Union[str, Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 70
|
import argparse
import json
import subprocess
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = (
f"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowerCamelCase_ = subprocess.run(lowercase , shell=lowercase , stdout=subprocess.PIPE )
lowerCamelCase_ = output.stdout.decode('utf-8' )
lowerCamelCase_ = json.loads(lowercase )
lowerCamelCase_ = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(lowercase )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(lowercase ) )
if len(lowercase ) > 0:
lowerCamelCase_ = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(f"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ):
'''simple docstring'''
return values.split(',' )
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
lowerCamelCase : Optional[int] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 70
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =LDMTextToImagePipeline
UpperCAmelCase_ =TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
UpperCAmelCase_ =PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
UpperCAmelCase_ =TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE_ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE_ = CLIPTextModel(_A )
SCREAMING_SNAKE_CASE_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vqvae''': vae,
'''bert''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def _UpperCamelCase ( self , _A , _A=0 ) -> int:
if str(_A ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = LDMTextToImagePipeline(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ = pipe(**_A ).images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self , _A , _A=torch.floataa , _A=0 ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = torch.manual_seed(_A )
SCREAMING_SNAKE_CASE_ = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(_A ).to(device=_A , dtype=_A )
SCREAMING_SNAKE_CASE_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ = self.get_inputs(_A )
SCREAMING_SNAKE_CASE_ = pipe(**_A ).images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.5_1825, 0.5_2850, 0.5_2543, 0.5_4258, 0.5_2304, 0.5_2569, 0.5_4363, 0.5_5276, 0.5_6878] )
SCREAMING_SNAKE_CASE_ = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self , _A , _A=torch.floataa , _A=0 ) -> Any:
SCREAMING_SNAKE_CASE_ = torch.manual_seed(_A )
SCREAMING_SNAKE_CASE_ = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(_A ).to(device=_A , dtype=_A )
SCREAMING_SNAKE_CASE_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ = self.get_inputs(_A )
SCREAMING_SNAKE_CASE_ = pipe(**_A ).images[0]
SCREAMING_SNAKE_CASE_ = load_numpy(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy''' )
SCREAMING_SNAKE_CASE_ = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 703
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A__ ( __lowerCamelCase = "laptop" ):
SCREAMING_SNAKE_CASE_ = F'''https://www.amazon.in/laptop/s?k={product}'''
SCREAMING_SNAKE_CASE_ = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
SCREAMING_SNAKE_CASE_ = BeautifulSoup(requests.get(__lowerCamelCase, headers=__lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
SCREAMING_SNAKE_CASE_ = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''', attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''}, ), soup.find_all('''div''', attrs={'''class''': '''a-row a-size-base a-color-base'''} ), ):
try:
SCREAMING_SNAKE_CASE_ = item.ha.text
SCREAMING_SNAKE_CASE_ = '''https://www.amazon.in/''' + item.ha.a['''href''']
SCREAMING_SNAKE_CASE_ = item.find('''span''', attrs={'''class''': '''a-offscreen'''} ).text
try:
SCREAMING_SNAKE_CASE_ = item.find('''span''', attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
SCREAMING_SNAKE_CASE_ = '''Not available'''
try:
SCREAMING_SNAKE_CASE_ = (
'''₹'''
+ item.find(
'''span''', attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
SCREAMING_SNAKE_CASE_ = ''''''
try:
SCREAMING_SNAKE_CASE_ = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''', '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''', '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''', '''''' ) )
)
* 1_00 )
except ValueError:
SCREAMING_SNAKE_CASE_ = float('''nan''' )
except AttributeError:
pass
SCREAMING_SNAKE_CASE_ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
SCREAMING_SNAKE_CASE_ = ''' '''
SCREAMING_SNAKE_CASE_ = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__UpperCAmelCase = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 597
| 0
|
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
__magic_name__ : Dict =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__magic_name__ : Union[str, Any] =mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
__magic_name__ : List[Any] =proportion * 4
print(F"The estimated value of pi is {pi_estimate}" )
print(F"The numpy value of pi is {pi}" )
print(F"The total error is {abs(pi - pi_estimate )}" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
__magic_name__ : Optional[int] =area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__magic_name__ : str =(max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {expected_value}" )
print(F"Total error is {abs(estimated_value - expected_value )}" )
print("""******************""" )
def lowerCAmelCase_ ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
__magic_name__ : Dict =area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {pi}" )
print(F"Total error is {abs(estimated_value - pi )}" )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class A__ ( _lowerCamelCase):
A_ : Union[PIL.Image.Image, np.ndarray]
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
super().__init__()
self.register_modules(
prior=_SCREAMING_SNAKE_CASE , image_encoder=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , renderer=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if latents is None:
__lowerCAmelCase : List[str] = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__lowerCAmelCase : Any = latents.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowerCAmelCase : Tuple = torch.device(f"cuda:{gpu_id}" )
__lowerCAmelCase : Union[str, Any] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@property
def __lowerCamelCase ( self ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(image[0] , torch.Tensor ):
__lowerCAmelCase : str = torch.cat(_SCREAMING_SNAKE_CASE , axis=0 ) if image[0].ndim == 4 else torch.stack(_SCREAMING_SNAKE_CASE , axis=0 )
if not isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
__lowerCAmelCase : Optional[int] = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
__lowerCAmelCase : Dict = image.to(dtype=self.image_encoder.dtype , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.image_encoder(_SCREAMING_SNAKE_CASE )['last_hidden_state']
__lowerCAmelCase : Optional[int] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
__lowerCAmelCase : Tuple = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : List[Any] = torch.zeros_like(_SCREAMING_SNAKE_CASE )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCAmelCase : Any = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 25 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 64 , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ):
if isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
__lowerCAmelCase : Union[str, Any] = 1
elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
__lowerCAmelCase : Tuple = image.shape[0]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
__lowerCAmelCase : Any = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_SCREAMING_SNAKE_CASE )}" )
__lowerCAmelCase : Optional[Any] = self._execution_device
__lowerCAmelCase : Optional[Any] = batch_size * num_images_per_prompt
__lowerCAmelCase : Any = guidance_scale > 1.0
__lowerCAmelCase : List[Any] = self._encode_image(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# prior
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.scheduler.timesteps
__lowerCAmelCase : Optional[int] = self.prior.config.num_embeddings
__lowerCAmelCase : List[str] = self.prior.config.embedding_dim
__lowerCAmelCase : Any = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
__lowerCAmelCase : str = latents.reshape(latents.shape[0] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase : Optional[int] = self.scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.prior(
_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , proj_embedding=_SCREAMING_SNAKE_CASE , ).predicted_image_embedding
# remove the variance
__lowerCAmelCase : Tuple = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
__lowerCAmelCase : List[Any] = noise_pred.chunk(2 )
__lowerCAmelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
__lowerCAmelCase : Dict = self.scheduler.step(
_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , sample=_SCREAMING_SNAKE_CASE , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = []
for i, latent in enumerate(_SCREAMING_SNAKE_CASE ):
print()
__lowerCAmelCase : int = self.renderer.decode(
latent[None, :] , _SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = torch.stack(_SCREAMING_SNAKE_CASE )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
__lowerCAmelCase : int = images.cpu().numpy()
if output_type == "pil":
__lowerCAmelCase : Dict = [self.numpy_to_pil(_SCREAMING_SNAKE_CASE ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 712
|
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 549
| 0
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case_ ( lowerCAmelCase ):
__lowerCamelCase : List[str] = (EulerDiscreteScheduler,)
__lowerCamelCase : Any = 10
def __A ( self , **__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'num_train_timesteps': 1_100,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**A_ )
return config
def __A ( self ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=A_ )
def __A ( self ):
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def __A ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A_ )
def __A ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Any = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE_ : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_model()
SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ : str = sample.to(A_ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.scale_model_input(A_ , A_ )
SCREAMING_SNAKE_CASE_ : List[str] = model(A_ , A_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step(A_ , A_ , A_ , generator=A_ )
SCREAMING_SNAKE_CASE_ : Any = output.prev_sample
SCREAMING_SNAKE_CASE_ : Tuple = torch.sum(torch.abs(A_ ) )
SCREAMING_SNAKE_CASE_ : str = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 10.08_07 ) < 1e-2
assert abs(result_mean.item() - 0.01_31 ) < 1e-3
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : str = self.get_scheduler_config(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE_ : int = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Any = self.dummy_model()
SCREAMING_SNAKE_CASE_ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ : int = sample.to(A_ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ : Tuple = scheduler.scale_model_input(A_ , A_ )
SCREAMING_SNAKE_CASE_ : Dict = model(A_ , A_ )
SCREAMING_SNAKE_CASE_ : Tuple = scheduler.step(A_ , A_ , A_ , generator=A_ )
SCREAMING_SNAKE_CASE_ : int = output.prev_sample
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.sum(torch.abs(A_ ) )
SCREAMING_SNAKE_CASE_ : int = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 0.00_02 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps , device=A_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_model()
SCREAMING_SNAKE_CASE_ : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sample.to(A_ )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ : Tuple = scheduler.scale_model_input(A_ , A_ )
SCREAMING_SNAKE_CASE_ : List[Any] = model(A_ , A_ )
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.step(A_ , A_ , A_ , generator=A_ )
SCREAMING_SNAKE_CASE_ : Dict = output.prev_sample
SCREAMING_SNAKE_CASE_ : Tuple = torch.sum(torch.abs(A_ ) )
SCREAMING_SNAKE_CASE_ : List[str] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 10.08_07 ) < 1e-2
assert abs(result_mean.item() - 0.01_31 ) < 1e-3
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Tuple = scheduler_class(**A_ , use_karras_sigmas=A_ )
scheduler.set_timesteps(self.num_inference_steps , device=A_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Any = self.dummy_model()
SCREAMING_SNAKE_CASE_ : int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE_ : Dict = sample.to(A_ )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler.scale_model_input(A_ , A_ )
SCREAMING_SNAKE_CASE_ : int = model(A_ , A_ )
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.step(A_ , A_ , A_ , generator=A_ )
SCREAMING_SNAKE_CASE_ : List[Any] = output.prev_sample
SCREAMING_SNAKE_CASE_ : str = torch.sum(torch.abs(A_ ) )
SCREAMING_SNAKE_CASE_ : int = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_24.52_29_94_99_51_17_19 ) < 1e-2
assert abs(result_mean.item() - 0.1_62_13_93_26_33_39_99_63 ) < 1e-3
| 345
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
snake_case : Tuple = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_SCREAMING_SNAKE_CASE = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase__ ,id=UpperCAmelCase__ )
| 605
| 0
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
_UpperCamelCase : Tuple = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 128,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class _snake_case ( unittest.TestCase ):
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ):
'''simple docstring'''
lowerCAmelCase = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
lowerCAmelCase = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id='test-config' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
lowerCAmelCase = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
lowerCAmelCase = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id='valid_org/test-config-org' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
lowerCAmelCase = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCAmelCase = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCAmelCase = AutoConfig.from_pretrained(F'{USER}/test-dynamic-config' , trust_remote_code=_SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class _snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCAmelCase = c.n_embd + 1 # int
lowerCAmelCase = c.resid_pdrop + 1.0 # float
lowerCAmelCase = not c.scale_attn_weights # bool
lowerCAmelCase = c.summary_type + 'foo' # str
c.update_from_string(
F'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(_SCREAMING_SNAKE_CASE , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(_SCREAMING_SNAKE_CASE , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(_SCREAMING_SNAKE_CASE , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(_SCREAMING_SNAKE_CASE , c.summary_type , 'mismatch for key: summary_type' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = PretrainedConfig()
lowerCAmelCase = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_SCREAMING_SNAKE_CASE , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCAmelCase = [key for key, value in config_common_kwargs.items() if value == getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F' {", ".join(_SCREAMING_SNAKE_CASE )}.' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCAmelCase = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCAmelCase = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = mock.Mock()
lowerCAmelCase = 5_00
lowerCAmelCase = {}
lowerCAmelCase = HTTPError
lowerCAmelCase = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=_SCREAMING_SNAKE_CASE ) as mock_head:
lowerCAmelCase = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = AutoConfig.from_pretrained('bert-base-cased' )
lowerCAmelCase = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = 2
json.dump(configuration.to_dict() , open(os.path.join(_SCREAMING_SNAKE_CASE , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCAmelCase = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCAmelCase = ['config.42.0.0.json']
lowerCAmelCase = 7_68
configuration.save_pretrained(_SCREAMING_SNAKE_CASE )
shutil.move(os.path.join(_SCREAMING_SNAKE_CASE , 'config.4.0.0.json' ) , os.path.join(_SCREAMING_SNAKE_CASE , 'config.42.0.0.json' ) )
lowerCAmelCase = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCAmelCase = 'v4.0.0'
lowerCAmelCase , lowerCAmelCase = new_transformers.models.auto.AutoConfig.from_pretrained(
_SCREAMING_SNAKE_CASE , return_unused_kwargs=_SCREAMING_SNAKE_CASE )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_SCREAMING_SNAKE_CASE , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCAmelCase = 'v3.0.0'
lowerCAmelCase = old_transformers.models.auto.AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 707
|
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : List[Any] = (IPNDMScheduler,)
SCREAMING_SNAKE_CASE : Optional[Any] = (('''num_inference_steps''', 50),)
def _SCREAMING_SNAKE_CASE ( self , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = {'num_train_timesteps': 10_00}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('num_inference_steps' , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[:]
if time_step is None:
lowerCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[:]
lowerCAmelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = new_scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCAmelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = new_scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('num_inference_steps' , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[:]
if time_step is None:
lowerCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[:]
lowerCAmelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = new_scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCAmelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = new_scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _SCREAMING_SNAKE_CASE ( self , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
return sample
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('num_inference_steps' , _SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(_SCREAMING_SNAKE_CASE , 'set_timesteps' ):
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(_SCREAMING_SNAKE_CASE , 'set_timesteps' ):
lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCAmelCase = dummy_past_residuals[:]
lowerCAmelCase = scheduler.timesteps[5]
lowerCAmelCase = scheduler.timesteps[6]
lowerCAmelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowerCAmelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE , time_step=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_SCREAMING_SNAKE_CASE , time_step=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.full_loop()
lowerCAmelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 514
| 0
|
"""simple docstring"""
from typing import List
import numpy as np
def __lowercase ( snake_case_ : dict ) ->int:
'''simple docstring'''
__A : Optional[int] = {key: len(snake_case_ ) for key, value in gen_kwargs.items() if isinstance(snake_case_ ,snake_case_ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
__A : int = max(lists_lengths.values() ,default=0 )
return max(1 ,snake_case_ )
def __lowercase ( snake_case_ : int ,snake_case_ : int ) ->List[range]:
'''simple docstring'''
__A : Any = []
for group_idx in range(snake_case_ ):
__A : Optional[int] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__A : Optional[Any] = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__A : List[str] = range(snake_case_ ,start + num_shards_to_add )
shards_indices_per_group.append(snake_case_ )
return shards_indices_per_group
def __lowercase ( snake_case_ : dict ,snake_case_ : int ) ->List[dict]:
'''simple docstring'''
__A : List[str] = _number_of_shards_in_gen_kwargs(snake_case_ )
if num_shards == 1:
return [dict(snake_case_ )]
else:
__A : Dict = _distribute_shards(num_shards=snake_case_ ,max_num_jobs=snake_case_ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(snake_case_ ,snake_case_ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(snake_case_ ) )
]
def __lowercase ( snake_case_ : List[dict] ) ->dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] ,snake_case_ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __lowercase ( snake_case_ : np.random.Generator ,snake_case_ : dict ) ->dict:
'''simple docstring'''
__A : Dict = {len(snake_case_ ) for value in gen_kwargs.values() if isinstance(snake_case_ ,snake_case_ )}
__A : Optional[int] = {}
for size in list_sizes:
__A : Optional[Any] = list(range(snake_case_ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__A : Tuple = dict(snake_case_ )
for key, value in shuffled_kwargs.items():
if isinstance(snake_case_ ,snake_case_ ):
__A : List[Any] = [value[i] for i in indices_per_size[len(snake_case_ )]]
return shuffled_kwargs
| 177
|
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
a_ = """\
@inproceedings{lin-2004-rouge,
title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",
author = \"Lin, Chin-Yew\",
booktitle = \"Text Summarization Branches Out\",
month = jul,
year = \"2004\",
address = \"Barcelona, Spain\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W04-1013\",
pages = \"74--81\",
}
"""
a_ = """\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
"""
a_ = """
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,
`\"rougeL\"`: Longest common subsequence based scoring.
`\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric('rouge')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
>>> print(results[\"rouge1\"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results[\"rouge1\"].mid.fmeasure)
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=False ):
'''simple docstring'''
if rouge_types is None:
__A : Dict = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
__A : Dict = rouge_scorer.RougeScorer(rouge_types=__lowerCamelCase , use_stemmer=__lowerCamelCase )
if use_aggregator:
__A : Dict = scoring.BootstrapAggregator()
else:
__A : Optional[Any] = []
for ref, pred in zip(__lowerCamelCase , __lowerCamelCase ):
__A : List[Any] = scorer.score(__lowerCamelCase , __lowerCamelCase )
if use_aggregator:
aggregator.add_scores(__lowerCamelCase )
else:
scores.append(__lowerCamelCase )
if use_aggregator:
__A : Optional[Any] = aggregator.aggregate()
else:
__A : List[Any] = {}
for key in scores[0]:
__A : Tuple = [score[key] for score in scores]
return result
| 177
| 1
|
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__A = logging.get_logger(__name__)
__A = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class _snake_case :
def __init__( self : str , UpperCAmelCase : str=None , **UpperCAmelCase : Optional[Any] ):
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
__lowerCamelCase : Optional[Any] = model
__lowerCamelCase : Optional[Any] = kwargs.get("model_save_dir" , UpperCAmelCase )
__lowerCamelCase : List[str] = kwargs.get("latest_model_name" , UpperCAmelCase )
def __call__( self : str , **UpperCAmelCase : int ):
__lowerCamelCase : Optional[Any] = {k: np.array(UpperCAmelCase ) for k, v in kwargs.items()}
return self.model.run(UpperCAmelCase , UpperCAmelCase )
@staticmethod
def lowerCamelCase__ ( UpperCAmelCase : Union[str, Path] , UpperCAmelCase : List[str]=None , UpperCAmelCase : Dict=None ):
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
__lowerCamelCase : List[Any] = "CPUExecutionProvider"
return ort.InferenceSession(UpperCAmelCase , providers=[provider] , sess_options=UpperCAmelCase )
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Union[str, Path] , UpperCAmelCase : Optional[str] = None , **UpperCAmelCase : Tuple ):
__lowerCamelCase : str = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__lowerCamelCase : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
__lowerCamelCase : Optional[Any] = Path(UpperCAmelCase ).joinpath(UpperCAmelCase )
try:
shutil.copyfile(UpperCAmelCase , UpperCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__lowerCamelCase : Optional[int] = self.model_save_dir.joinpath(UpperCAmelCase )
if src_path.exists():
__lowerCamelCase : Tuple = Path(UpperCAmelCase ).joinpath(UpperCAmelCase )
try:
shutil.copyfile(UpperCAmelCase , UpperCAmelCase )
except shutil.SameFileError:
pass
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Union[str, os.PathLike] , **UpperCAmelCase : Any , ):
if os.path.isfile(UpperCAmelCase ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
# saving model weights/files
self._save_pretrained(UpperCAmelCase , **UpperCAmelCase )
@classmethod
def lowerCamelCase__ ( cls : List[str] , UpperCAmelCase : Union[str, Path] , UpperCAmelCase : Optional[Union[bool, str, None]] = None , UpperCAmelCase : Optional[Union[str, None]] = None , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional["ort.SessionOptions"] = None , **UpperCAmelCase : str , ):
__lowerCamelCase : str = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCAmelCase ):
__lowerCamelCase : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(UpperCAmelCase , UpperCAmelCase ) , provider=UpperCAmelCase , sess_options=UpperCAmelCase )
__lowerCamelCase : List[Any] = Path(UpperCAmelCase )
# load model from hub
else:
# download model
__lowerCamelCase : List[Any] = hf_hub_download(
repo_id=UpperCAmelCase , filename=UpperCAmelCase , use_auth_token=UpperCAmelCase , revision=UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , )
__lowerCamelCase : Union[str, Any] = Path(UpperCAmelCase ).parent
__lowerCamelCase : Tuple = Path(UpperCAmelCase ).name
__lowerCamelCase : Dict = OnnxRuntimeModel.load_model(UpperCAmelCase , provider=UpperCAmelCase , sess_options=UpperCAmelCase )
return cls(model=UpperCAmelCase , **UpperCAmelCase )
@classmethod
def lowerCamelCase__ ( cls : List[Any] , UpperCAmelCase : Union[str, Path] , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , **UpperCAmelCase : Optional[int] , ):
__lowerCamelCase : List[Any] = None
if len(str(UpperCAmelCase ).split("@" ) ) == 2:
__lowerCamelCase , __lowerCamelCase : Tuple = model_id.split("@" )
return cls._from_pretrained(
model_id=UpperCAmelCase , revision=UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , use_auth_token=UpperCAmelCase , **UpperCAmelCase , )
| 366
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__A = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowercase_ ( _lowerCamelCase: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def lowercase_ ( _lowerCamelCase: Union[str, Any] ) -> Any:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Optional[int] ) -> Tuple:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
__lowerCamelCase : Any = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Tuple ) -> Dict:
'''simple docstring'''
if exitstatus == 5:
__lowerCamelCase : Optional[int] = 0
# Doctest custom flag to ignore output.
__A = doctest.register_optionflag('''IGNORE_RESULT''')
__A = doctest.OutputChecker
class _snake_case ( a__ ):
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : int ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__A = CustomOutputChecker
__A = HfDoctestModule
__A = HfDocTestParser
| 366
| 1
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict ):
if not head:
return True
# split the list to two parts
_A , _A = head.next, head
while fast and fast.next:
_A = fast.next.next
_A = slow.next
_A = slow.next
_A = None # Don't forget here! But forget still works!
# reverse the second part
_A = None
while second:
_A = second.next
_A = node
_A = second
_A = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
_A = node.next
_A = head.next
return True
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
_A = _A = _A = head
while fast and fast.next:
_A , _A = fast.next.next, slow.next
# 2. Push the second half into the stack
_A = [slow.val]
while slow.next:
_A = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
_A = cur.next
return True
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] ):
if not head or not head.next:
return True
_A = {}
_A = 0
while head:
if head.val in d:
d[head.val].append(__snake_case )
else:
_A = [pos]
_A = head.next
pos += 1
_A = pos - 1
_A = 0
for v in d.values():
if len(__snake_case ) % 2 != 0:
middle += 1
else:
_A = 0
for i in range(0 , len(__snake_case ) ):
if v[i] + v[len(__snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 107
|
'''simple docstring'''
import os
lowercase : Tuple = {'I': 1, 'V': 5, 'X': 1_0, 'L': 5_0, 'C': 1_0_0, 'D': 5_0_0, 'M': 1_0_0_0}
def __a ( A__ ) -> int:
lowerCAmelCase = 0
lowerCAmelCase = 0
while index < len(A__ ) - 1:
lowerCAmelCase = SYMBOLS[numerals[index]]
lowerCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __a ( A__ ) -> str:
lowerCAmelCase = ""
lowerCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __a ( A__ = "/p089_roman.txt" ) -> int:
lowerCAmelCase = 0
with open(os.path.dirname(A__ ) + roman_numerals_filename ) as filea:
lowerCAmelCase = filea.readlines()
for line in lines:
lowerCAmelCase = line.strip()
lowerCAmelCase = parse_roman_numerals(A__ )
lowerCAmelCase = generate_roman_numerals(A__ )
savings += len(A__ ) - len(A__ )
return savings
if __name__ == "__main__":
print(f"{solution() = }")
| 649
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase : Union[str, Any] =logging.get_logger(__name__)
_lowercase : Union[str, Any] ="▁"
_lowercase : str ={"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
_lowercase : Tuple ={
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
_lowercase : Optional[int] ={"vinai/bartpho-syllable": 1_0_2_4}
class SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase ):
'''simple docstring'''
lowercase : Optional[Any] = VOCAB_FILES_NAMES
lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Tuple = ["input_ids", "attention_mask"]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Optional[int]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[int]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<s>" , SCREAMING_SNAKE_CASE__ : int="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : List[Any]="<mask>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : int , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
A : int =AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
A : Any ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
A : Tuple =vocab_file
A : Any =monolingual_vocab_file
A : Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
A : Dict ={}
A : Optional[int] =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowercase_ ) not in self.fairseq_tokens_to_ids:
A : List[Any] =cnt
cnt += 1
with open(lowercase_ , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
A : int =line.strip().split()[0]
A : Dict =len(self.fairseq_tokens_to_ids )
if str(lowercase_ ) not in self.fairseq_tokens_to_ids:
A : Optional[Any] =len(self.fairseq_tokens_to_ids )
A : Tuple ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : str ) -> Optional[Any]:
A : List[str] =self.__dict__.copy()
A : Optional[Any] =None
A : str =self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
A : int =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
A : int ={}
A : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> Optional[Any]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A : Optional[int] =[self.cls_token_id]
A : str =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> int:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[str]:
A : Any =[self.sep_token_id]
A : Tuple =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
return len(self.fairseq_ids_to_tokens )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[Any]:
A : Dict ={self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> Any:
return self.fairseq_ids_to_tokens[index]
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
A : Union[str, Any] ="""""".join(lowercase_ ).replace(lowercase_ , ' ' ).strip()
return out_string
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Optional[int]:
if not os.path.isdir(lowercase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A : Optional[int] =os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A : str =os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , 'wb' ) as fi:
A : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowercase_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowercase_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowercase_ , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(lowercase_ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 708
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : int =logging.get_logger(__name__)
_lowercase : Dict ={
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[int] = "xglm"
lowercase : Any = ["past_key_values"]
lowercase : Dict = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=25_60_08 , SCREAMING_SNAKE_CASE__ : Dict=20_48 , SCREAMING_SNAKE_CASE__ : List[Any]=10_24 , SCREAMING_SNAKE_CASE__ : str=40_96 , SCREAMING_SNAKE_CASE__ : Optional[int]=24 , SCREAMING_SNAKE_CASE__ : Optional[Any]=16 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> int:
A : str =vocab_size
A : Union[str, Any] =max_position_embeddings
A : Optional[Any] =d_model
A : Optional[int] =ffn_dim
A : int =num_layers
A : Any =attention_heads
A : Dict =activation_function
A : List[Any] =dropout
A : str =attention_dropout
A : List[Any] =activation_dropout
A : List[Any] =layerdrop
A : List[Any] =init_std
A : Union[str, Any] =scale_embedding # scale factor will be sqrt(d_model) if True
A : List[str] =use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
| 661
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
__magic_name__ : Optional[Any] = TypeVar("""T""")
class lowercase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self , _A ):
'''simple docstring'''
UpperCamelCase : List[str] = data
UpperCamelCase : List[str] = self
UpperCamelCase : int = 0
class lowercase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
UpperCamelCase : dict[T, DisjointSetTreeNode[T]] = {}
def _a ( self , _A ):
'''simple docstring'''
UpperCamelCase : Optional[int] = DisjointSetTreeNode(_A )
def _a ( self , _A ):
'''simple docstring'''
UpperCamelCase : Any = self.map[data]
if elem_ref != elem_ref.parent:
UpperCamelCase : Dict = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def _a ( self , _A , _A ):
'''simple docstring'''
if nodea.rank > nodea.rank:
UpperCamelCase : Optional[int] = nodea
else:
UpperCamelCase : str = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def _a ( self , _A , _A ):
'''simple docstring'''
self.link(self.find_set(_A ) , self.find_set(_A ) )
class lowercase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
UpperCamelCase : dict[T, dict[T, int]] = {}
def _a ( self , _A ):
'''simple docstring'''
if node not in self.connections:
UpperCamelCase : List[str] = {}
def _a ( self , _A , _A , _A ):
'''simple docstring'''
self.add_node(_A )
self.add_node(_A )
UpperCamelCase : int = weight
UpperCamelCase : Optional[int] = weight
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Tuple = []
UpperCamelCase : List[Any] = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda _A : x[2] )
# creating the disjoint set
UpperCamelCase : Dict = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(_A )
# MST generation
UpperCamelCase : List[Any] = 0
UpperCamelCase : int = 0
UpperCamelCase : int = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = edges[index]
index += 1
UpperCamelCase : Optional[int] = disjoint_set.find_set(_A )
UpperCamelCase : Union[str, Any] = disjoint_set.find_set(_A )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(_A , _A , _A )
disjoint_set.union(_A , _A )
return graph
| 102
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 413
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__lowerCamelCase :Optional[Any] = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 702
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: List[Any] , __a: List[str] , __a: Optional[int]=13 , __a: List[str]=32 , __a: int=2 , __a: List[str]=3 , __a: Union[str, Any]=16 , __a: int=[32, 64, 128] , __a: Optional[Any]=[1, 2, 1] , __a: Optional[int]=[2, 2, 4] , __a: Tuple=2 , __a: Dict=2.0 , __a: List[str]=True , __a: Optional[Any]=0.0 , __a: Any=0.0 , __a: List[Any]=0.1 , __a: List[str]="gelu" , __a: Tuple=False , __a: Union[str, Any]=True , __a: Optional[int]=0.02 , __a: Tuple=1e-5 , __a: int=True , __a: List[Any]=None , __a: Optional[int]=True , __a: Dict=10 , __a: List[str]=8 , __a: Any=["stage1", "stage2"] , __a: Union[str, Any]=[1, 2] , )-> Dict:
lowerCamelCase : Dict = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Union[str, Any] = image_size
lowerCamelCase : Optional[int] = patch_size
lowerCamelCase : Any = num_channels
lowerCamelCase : Any = embed_dim
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : List[Any] = depths
lowerCamelCase : Tuple = num_heads
lowerCamelCase : List[Any] = window_size
lowerCamelCase : str = mlp_ratio
lowerCamelCase : str = qkv_bias
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Tuple = drop_path_rate
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Tuple = use_absolute_embeddings
lowerCamelCase : List[str] = patch_norm
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : str = initializer_range
lowerCamelCase : Tuple = is_training
lowerCamelCase : int = scope
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : List[str] = type_sequence_label_size
lowerCamelCase : str = encoder_stride
lowerCamelCase : List[str] = out_features
lowerCamelCase : Optional[int] = out_indices
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : str = None
if self.use_labels:
lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def a__ ( self: List[Any] )-> Optional[int]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self: Tuple , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Tuple = model(__a )
lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self: Optional[int] , __a: Dict , __a: Tuple , __a: List[Any] )-> int:
lowerCamelCase : List[Any] = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase : Dict = None
lowerCamelCase : Dict = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase : List[str] = 1
lowerCamelCase : Any = FocalNetForMaskedImageModeling(__a )
model.to(__a )
model.eval()
lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self: str , __a: Optional[Any] , __a: Optional[Any] , __a: Tuple )-> str:
lowerCamelCase : Optional[Any] = self.type_sequence_label_size
lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : int = 1
lowerCamelCase : List[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self: int )-> Optional[int]:
lowerCamelCase : str = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs
lowerCamelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] =(
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Tuple =False
snake_case__ : Dict =False
snake_case__ : Dict =False
snake_case__ : Tuple =False
snake_case__ : Optional[int] =False
def a__ ( self: Union[str, Any] )-> Optional[int]:
lowerCamelCase : List[str] = FocalNetModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a )
def a__ ( self: List[str] )-> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: List[str] )-> Union[str, Any]:
return
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[Any] )-> Dict:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: List[Any] )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def a__ ( self: Optional[Any] )-> str:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def a__ ( self: Optional[Any] )-> Dict:
pass
def a__ ( self: Optional[Any] )-> Dict:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : Any = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def a__ ( self: Tuple )-> Optional[int]:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : int = model_class(__a )
lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Any = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: str , __a: Union[str, Any] , __a: int , __a: Tuple , __a: List[str] )-> Union[str, Any]:
lowerCamelCase : List[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : List[str] = outputs.hidden_states
lowerCamelCase : Tuple = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# FocalNet has a different seq_length
lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = reshaped_hidden_states[0].shape
lowerCamelCase : Tuple = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self: Any )-> Any:
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : List[str] = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__a , __a , __a , __a )
def a__ ( self: str )-> Union[str, Any]:
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : List[str] = 3
lowerCamelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : str = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Union[str, Any] = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
@slow
def a__ ( self: Optional[int] )-> List[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : List[str] = FocalNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> Any:
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : int = _config_zero_init(__a )
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Optional[int] )-> Optional[Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Tuple = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__a )
lowerCamelCase : Any = self.default_image_processor
lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase : int = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : List[str] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =(FocalNetBackbone,) if is_torch_available() else ()
snake_case__ : Optional[int] =FocalNetConfig
snake_case__ : str =False
def a__ ( self: Union[str, Any] )-> Tuple:
lowerCamelCase : str = FocalNetModelTester(self )
| 42
| 0
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase__: Any = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class snake_case_ ( lowerCAmelCase ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
if prompt is not None:
SCREAMING_SNAKE_CASE_ : List[str] = prompt
if generate_kwargs is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
SCREAMING_SNAKE_CASE_ : int = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
SCREAMING_SNAKE_CASE_ : Dict = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase ):
return super().__call__(UpperCAmelCase__ , **UpperCAmelCase__ )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase=None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_image(UpperCAmelCase__ )
if prompt is not None:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(
F'Received an invalid text input, got - {type(UpperCAmelCase__ )} - but expected a single string. '
'Note also that one single text can be provided for conditional image to text generation.' )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model.config.model_type
if model_type == "git":
SCREAMING_SNAKE_CASE_ : str = self.image_processor(images=UpperCAmelCase__ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer(text=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ).input_ids
SCREAMING_SNAKE_CASE_ : List[str] = [self.tokenizer.cls_token_id] + input_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor(UpperCAmelCase__ ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processor(images=UpperCAmelCase__ , header_text=UpperCAmelCase__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processor(images=UpperCAmelCase__ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer(UpperCAmelCase__ , return_tensors=self.framework )
model_inputs.update(UpperCAmelCase__ )
else:
raise ValueError(F'Model type {model_type} does not support conditional text generation' )
else:
SCREAMING_SNAKE_CASE_ : int = self.image_processor(images=UpperCAmelCase__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
return model_inputs
def __A ( self , __lowerCAmelCase , __lowerCAmelCase=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , UpperCAmelCase__ )
and all(x is None for x in model_inputs['input_ids'] )
):
SCREAMING_SNAKE_CASE_ : Dict = None
if generate_kwargs is None:
SCREAMING_SNAKE_CASE_ : List[str] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
SCREAMING_SNAKE_CASE_ : str = model_inputs.pop(self.model.main_input_name )
SCREAMING_SNAKE_CASE_ : int = self.model.generate(UpperCAmelCase__ , **UpperCAmelCase__ , **UpperCAmelCase__ )
return model_outputs
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for output_ids in model_outputs:
SCREAMING_SNAKE_CASE_ : List[str] = {
'''generated_text''': self.tokenizer.decode(
UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , )
}
records.append(UpperCAmelCase__ )
return records
| 345
|
import unittest
from transformers import DonutProcessor
_a : Optional[int] = 'naver-clova-ix/donut-base'
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Optional[Any] = DonutProcessor.from_pretrained(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
snake_case : Optional[Any] = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
snake_case : Any = self.processor.tokenajson(UpperCAmelCase__ )
self.assertDictEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 598
| 0
|
import warnings
warnings.warn(
'''memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '''
'''`from accelerate import find_executable_batch_size` to avoid this warning.''',
FutureWarning,
)
| 189
|
def __lowerCAmelCase ( snake_case : int = 1000000 ) -> int:
__lowerCamelCase: Union[str, Any] = set(range(3 , snake_case , 2 ) )
primes.add(2 )
for p in range(3 , snake_case , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case , snake_case ) ) )
__lowerCamelCase: Any = [float(snake_case ) for n in range(limit + 1 )]
for p in primes:
for n in range(snake_case , limit + 1 , snake_case ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 189
| 1
|
"""simple docstring"""
__lowerCAmelCase : List[Any] =[
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 359
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : int =logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] ={
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class _A ( lowerCAmelCase ):
snake_case__ : Optional[int] = 'mra'
def __init__( self , __lowerCAmelCase=5_0265 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=1 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-5 , __lowerCAmelCase="absolute" , __lowerCAmelCase=4 , __lowerCAmelCase="full" , __lowerCAmelCase=0 , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = type_vocab_size
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = block_per_row
lowercase = approx_mode
lowercase = initial_prior_first_n_blocks
lowercase = initial_prior_diagonal_n_blocks
| 359
| 1
|
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 713
|
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_snake_case : Union[str, Any] = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_snake_case : Tuple = concatenate_datasets
_snake_case : Any = DownloadConfig
_snake_case : List[Any] = DownloadManager
_snake_case : Union[str, Any] = DownloadMode
_snake_case : Dict = DownloadConfig
_snake_case : List[str] = DownloadMode
_snake_case : Union[str, Any] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 493
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase : Any = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def __a ( A__ , A__ , A__=8 ) -> Dict:
lowerCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : DDPMScheduler , SCREAMING_SNAKE_CASE : VQModel , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , movq=SCREAMING_SNAKE_CASE , )
lowerCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __A ( self : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple ) -> Tuple:
"""simple docstring"""
if latents is None:
lowerCAmelCase = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
lowerCAmelCase = latents.to(SCREAMING_SNAKE_CASE )
lowerCAmelCase = latents * scheduler.init_noise_sigma
return latents
def __A ( self : Dict , SCREAMING_SNAKE_CASE : Dict=0 ) -> List[str]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCAmelCase = torch.device(f"cuda:{gpu_id}" )
lowerCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __A ( self : Any , SCREAMING_SNAKE_CASE : Any=0 ) -> Union[str, Any]:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowerCAmelCase = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCAmelCase , lowerCAmelCase = cpu_offload_with_hook(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prev_module_hook=SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
lowerCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self : Any ) -> List[Any]:
"""simple docstring"""
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE )
def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : int = 5_1_2 , SCREAMING_SNAKE_CASE : int = 5_1_2 , SCREAMING_SNAKE_CASE : int = 1_0_0 , SCREAMING_SNAKE_CASE : float = 4.0 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , ) -> int:
"""simple docstring"""
lowerCAmelCase = self._execution_device
lowerCAmelCase = guidance_scale > 1.0
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
lowerCAmelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
lowerCAmelCase = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE , dim=0 )
lowerCAmelCase = negative_image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE , dim=0 )
lowerCAmelCase = hint.repeat_interleave(SCREAMING_SNAKE_CASE , dim=0 )
lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.scheduler.timesteps
lowerCAmelCase = self.movq.config.latent_channels
lowerCAmelCase , lowerCAmelCase = downscale_height_and_width(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.movq_scale_factor )
# create initial latent
lowerCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.scheduler , )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase = {"image_embeds": image_embeds, "hint": hint}
lowerCAmelCase = self.unet(
sample=SCREAMING_SNAKE_CASE , timestep=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , added_cond_kwargs=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
lowerCAmelCase , lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
lowerCAmelCase , lowerCAmelCase = noise_pred.chunk(2 )
lowerCAmelCase , lowerCAmelCase = variance_pred.chunk(2 )
lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCAmelCase , lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase = self.scheduler.step(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , )[0]
# post-processing
lowerCAmelCase = self.movq.decode(SCREAMING_SNAKE_CASE , force_not_quantize=SCREAMING_SNAKE_CASE )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
lowerCAmelCase = image * 0.5 + 0.5
lowerCAmelCase = image.clamp(0 , 1 )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase = self.numpy_to_pil(SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE )
| 649
|
'''simple docstring'''
def __a ( A__ , A__ ) -> int:
return int((input_a, input_a).count(0 ) == 0 )
def __a ( ) -> None:
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 649
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCAmelCase : Any = TypeVar('T')
UpperCAmelCase : str = TypeVar('U')
class lowerCamelCase (Generic[T, U] ):
def __init__( self , lowercase__ , lowercase__ ) -> List[Any]:
"""simple docstring"""
_snake_case : str = key
_snake_case : Optional[int] = val
_snake_case : DoubleLinkedListNode[T, U] | None = None
_snake_case : DoubleLinkedListNode[T, U] | None = None
def __repr__( self ) -> str:
"""simple docstring"""
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class lowerCamelCase (Generic[T, U] ):
def __init__( self ) -> None:
"""simple docstring"""
_snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ )
_snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ )
_snake_case : Union[str, Any] = self.rear, self.head
def __repr__( self ) -> str:
"""simple docstring"""
_snake_case : List[Any] = ['''DoubleLinkedList''']
_snake_case : str = self.head
while node.next is not None:
rep.append(str(lowercase__ ) )
_snake_case : List[str] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : Tuple = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_snake_case : Union[str, Any] = node
_snake_case : Optional[Any] = previous
_snake_case : int = node
_snake_case : Union[str, Any] = self.rear
def UpperCAmelCase_ ( self , lowercase__ ) -> DoubleLinkedListNode[T, U] | None:
"""simple docstring"""
if node.prev is None or node.next is None:
return None
_snake_case : Optional[int] = node.next
_snake_case : Any = node.prev
_snake_case : List[str] = None
_snake_case : Optional[int] = None
return node
class lowerCamelCase (Generic[T, U] ):
_lowercase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : DoubleLinkedList[T, U] = DoubleLinkedList()
_snake_case : Union[str, Any] = capacity
_snake_case : int = 0
_snake_case : Dict = 0
_snake_case : Union[str, Any] = 0
_snake_case : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ) -> str:
"""simple docstring"""
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self , lowercase__ ) -> bool:
"""simple docstring"""
return key in self.cache
def UpperCAmelCase_ ( self , lowercase__ ) -> U | None:
"""simple docstring"""
if key in self.cache:
self.hits += 1
_snake_case : DoubleLinkedListNode[T, U] = self.cache[key]
_snake_case : Tuple = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowercase__ )
return node.val
self.miss += 1
return None
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None:
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_snake_case : Dict = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowercase__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_snake_case : Optional[int] = DoubleLinkedListNode(lowercase__ , lowercase__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_snake_case : Optional[Any] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_snake_case : Optional[Any] = value
self.list.add(lowercase__ )
@classmethod
def UpperCAmelCase_ ( cls , lowercase__ = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
"""simple docstring"""
def cache_decorator_inner(lowercase__ ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowercase__ ) -> U:
if func not in cls.decorator_function_to_instance_map:
_snake_case : Optional[Any] = LRUCache(lowercase__ )
_snake_case : Union[str, Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_snake_case : Tuple = func(*lowercase__ )
cls.decorator_function_to_instance_map[func].put(args[0] , lowercase__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowercase__ , '''cache_info''' , lowercase__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
UpperCAmelCase : str = logging.getLogger(__name__)
UpperCAmelCase : Dict = 5_0 # max width of layer names
UpperCAmelCase : Union[str, Any] = 7_0 # max width of quantizer names
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=lowerCAmelCase_ , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=lowerCAmelCase_ , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=lowerCAmelCase_ , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=lowerCAmelCase_ , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=lowerCAmelCase_ , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=lowerCAmelCase_ , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if args.calibrator == "max":
_snake_case : Optional[int] = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
_snake_case : Tuple = '''histogram'''
elif args.calibrator == "mse":
_snake_case : int = '''histogram'''
else:
raise ValueError(f'''Invalid calibrator {args.calibrator}''' )
_snake_case : Tuple = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase_ )
_snake_case : str = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ):
"""simple docstring"""
logger.info('''Configuring Model for Quantization''' )
logger.info(f'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCAmelCase_ , ['''embeddings'''] , which='''weight''' , _disabled=lowerCAmelCase_ )
if args.quant_disable:
set_quantizer_by_name(lowerCAmelCase_ , [''''''] , _disabled=lowerCAmelCase_ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCAmelCase_ , args.quant_disable_keyword , _disabled=lowerCAmelCase_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=lowerCAmelCase_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=lowerCAmelCase_ )
if args.recalibrate_weights:
recalibrate_weights(lowerCAmelCase_ )
if args.fuse_qkv:
fuse_qkv(lowerCAmelCase_ , lowerCAmelCase_ )
if args.clip_gelu:
clip_gelu(lowerCAmelCase_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'''{name:80}: {module}''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
def fusea(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCAmelCase_ , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
_snake_case : Tuple = qq._amax.detach().item()
_snake_case : Tuple = qk._amax.detach().item()
_snake_case : List[Any] = qv._amax.detach().item()
_snake_case : List[str] = max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
qq._amax.fill_(lowerCAmelCase_ )
qk._amax.fill_(lowerCAmelCase_ )
qv._amax.fill_(lowerCAmelCase_ )
logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(f'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
_snake_case : List[Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase_ )
_snake_case : List[str] = mod._input_quantizer._amax.data.detach().item()
logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
_snake_case : Dict = mod.weight.shape[0]
_snake_case : Optional[int] = mod._weight_quantizer._amax.detach()
_snake_case : Optional[int] = torch.ones(lowerCAmelCase_ , dtype=amax.dtype , device=amax.device ) * amax
print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_snake_case : int = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_snake_case : Dict = set(range(len(mod.weight.size() ) ) ) - axis_set
_snake_case : Optional[int] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase_ , keepdims=lowerCAmelCase_ ).detach()
logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
_snake_case : Tuple = amax
def _a ( lowerCAmelCase_ , lowerCAmelCase_=25 , lowerCAmelCase_=180 , lowerCAmelCase_=None ):
"""simple docstring"""
if ignore is None:
_snake_case : Dict = []
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Optional[int] = [ignore]
_snake_case : str = 0
for name, mod in model.named_modules():
if not hasattr(lowerCAmelCase_ , '''weight''' ):
continue
_snake_case : Optional[int] = max(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
for name, mod in model.named_modules():
_snake_case : Optional[Any] = getattr(lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ )
_snake_case : Tuple = getattr(lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ )
if not hasattr(lowerCAmelCase_ , '''weight''' ):
continue
if type(lowerCAmelCase_ ) in ignore:
continue
if [True for s in ignore if type(lowerCAmelCase_ ) is str and s in name]:
continue
_snake_case : Optional[int] = f'''Act:{input_q.extra_repr()}'''
_snake_case : Any = f'''Wgt:{weight_q.extra_repr()}'''
_snake_case : Optional[int] = f'''{name:{name_width}} {act_str} {wgt_str}'''
if len(lowerCAmelCase_ ) <= line_width:
logger.info(lowerCAmelCase_ )
else:
logger.info(f'''{name:{name_width}} {act_str}''' )
logger.info(f'''{" ":{name_width}} {wgt_str}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : str = 0
for name, mod in model.named_modules():
if isinstance(lowerCAmelCase_ , pytorch_quantization.nn.TensorQuantizer ):
print(f'''{name:80} {mod}''' )
count += 1
print(f'''{count} TensorQuantizers found in model''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if quantizer_mod is not None:
assert hasattr(lowerCAmelCase_ , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
logger.warning(f'''{name} has no {quantizer}''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="both" , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = f'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ )
if which in ["weight", "both"]:
set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_input_quantizer''' ) or hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ):
for n in names:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
set_quantizers(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Any = f'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info(lowerCAmelCase_ )
| 47
| 0
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowerCAmelCase__ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowerCAmelCase__ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print("""\n""".join(upper_files) + """\n""")
lowerCAmelCase__ = [file for file in filepaths if """ """ in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print("""\n""".join(space_files) + """\n""")
lowerCAmelCase__ = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print("""\n""".join(hyphen_files) + """\n""")
lowerCAmelCase__ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print("""\n""".join(nodir_files) + """\n""")
lowerCAmelCase__ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 514
|
from PIL import Image
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Image , SCREAMING_SNAKE_CASE_: int ) -> Image:
'''simple docstring'''
A__ = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(SCREAMING_SNAKE_CASE_: int ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
lowerCAmelCase__ = change_contrast(img, 1_7_0)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 514
| 1
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str ) -> list[int]:
return [ord(UpperCAmelCase_ ) - 9_6 for elem in plain]
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : list[int] ) -> str:
return "".join(chr(elem + 9_6 ) for elem in encoded )
def SCREAMING_SNAKE_CASE_ ( ) -> None:
SCREAMING_SNAKE_CASE_ : int =encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , UpperCAmelCase_ )
print('''Decoded:''' , decode(UpperCAmelCase_ ) )
if __name__ == "__main__":
main()
| 431
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowercase_ :
def __init__( self , __A , __A=14 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.02 , __A=3 , __A=4 , __A=None , ) -> Tuple:
SCREAMING_SNAKE_CASE_ : int =parent
SCREAMING_SNAKE_CASE_ : Dict =batch_size
SCREAMING_SNAKE_CASE_ : int =seq_length
SCREAMING_SNAKE_CASE_ : Tuple =is_training
SCREAMING_SNAKE_CASE_ : int =use_token_type_ids
SCREAMING_SNAKE_CASE_ : str =use_input_mask
SCREAMING_SNAKE_CASE_ : Tuple =use_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] =use_mc_token_ids
SCREAMING_SNAKE_CASE_ : Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] =hidden_size
SCREAMING_SNAKE_CASE_ : Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[int] =intermediate_size
SCREAMING_SNAKE_CASE_ : Tuple =hidden_act
SCREAMING_SNAKE_CASE_ : Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : int =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict =max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[str] =type_vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] =type_sequence_label_size
SCREAMING_SNAKE_CASE_ : str =initializer_range
SCREAMING_SNAKE_CASE_ : Tuple =num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] =num_choices
SCREAMING_SNAKE_CASE_ : Optional[Any] =scope
SCREAMING_SNAKE_CASE_ : str =self.vocab_size - 1
def _snake_case ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Tuple =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : int =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict =None
if self.use_mc_token_ids:
SCREAMING_SNAKE_CASE_ : Any =ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
SCREAMING_SNAKE_CASE_ : Tuple =None
SCREAMING_SNAKE_CASE_ : int =None
SCREAMING_SNAKE_CASE_ : Optional[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Dict =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : Optional[Any] =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.get_config()
SCREAMING_SNAKE_CASE_ : Union[str, Any] =ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _snake_case ( self ) -> List[Any]:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _snake_case ( self , __A , __A , __A , __A , __A , *__A ) -> int:
SCREAMING_SNAKE_CASE_ : List[str] =CTRLModel(config=__A )
model.to(__A )
model.eval()
model(__A , token_type_ids=__A , head_mask=__A )
model(__A , token_type_ids=__A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _snake_case ( self , __A , __A , __A , __A , __A , *__A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : Dict =CTRLLMHeadModel(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ : int =model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self ) -> str:
SCREAMING_SNAKE_CASE_ : Dict =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : int =config_and_inputs
SCREAMING_SNAKE_CASE_ : int ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def _snake_case ( self , __A , __A , __A , __A , *__A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : Any =self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] =CTRLForSequenceClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict =model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowercase_ ( A , A , A , unittest.TestCase ):
__lowerCamelCase = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__lowerCamelCase = (CTRLLMHeadModel,) if is_torch_available() else ()
__lowerCamelCase = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
def _snake_case ( self , __A , __A , __A , __A , __A ) -> Optional[int]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _snake_case ( self ) -> int:
SCREAMING_SNAKE_CASE_ : Tuple =CTRLModelTester(self )
SCREAMING_SNAKE_CASE_ : Dict =ConfigTester(self , config_class=__A , n_embd=37 )
def _snake_case ( self ) -> Union[str, Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> List[str]:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__A )
def _snake_case ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _snake_case ( self ) -> Optional[int]:
pass
@slow
def _snake_case ( self ) -> List[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : str =CTRLModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _snake_case ( self ) -> Optional[int]:
pass
@require_torch
class lowercase_ ( unittest.TestCase ):
def _snake_case ( self ) -> List[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _snake_case ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : Any =CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(__A )
SCREAMING_SNAKE_CASE_ : Any =torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=__A ) # Legal the president is
SCREAMING_SNAKE_CASE_ : Optional[Any] =[
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
SCREAMING_SNAKE_CASE_ : Dict =model.generate(__A , do_sample=__A )
self.assertListEqual(output_ids[0].tolist() , __A )
| 431
| 1
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
snake_case__ : Union[str, Any] = False
snake_case__ : int = True
snake_case__ : List[str] = False
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
snake_case__ : List[str] = parser.parse_args()
snake_case__ : Optional[Any] = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
snake_case__ : Dict = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
snake_case__ : Optional[int] = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
snake_case__ : Any = reader.read()
snake_case__ : Optional[Any] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
snake_case__ : Any = UNetaDModel(**config)
else:
snake_case__ : Any = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
snake_case__ : Optional[Any] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
snake_case__ : Optional[int] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
snake_case__ : Optional[int] = config[key]
del config[key]
snake_case__ : List[str] = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
snake_case__ : Dict = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
snake_case__ : Tuple = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
snake_case__ : Optional[int] = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
snake_case__ : List[str] = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
snake_case__ : Tuple = param_value
snake_case__ : Optional[Any] = True
if not has_changed:
snake_case__ : Optional[Any] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 392
|
def lowercase ( _lowerCAmelCase ):
UpperCAmelCase__ = len(_lowerCAmelCase )
while cur > 1:
# Find the maximum number in arr
UpperCAmelCase__ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
UpperCAmelCase__ = arr[mi::-1] + arr[mi + 1 : len(_lowerCAmelCase )]
# Reverse whole list
UpperCAmelCase__ = arr[cur - 1 :: -1] + arr[cur : len(_lowerCAmelCase )]
cur -= 1
return arr
if __name__ == "__main__":
snake_case__ : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
snake_case__ : str = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 392
| 1
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case :str = logging.get_logger(__name__)
__snake_case :str = ['''model.decoder.embed_positions.weights''']
def __snake_case ( _UpperCAmelCase ):
if "emb" in name:
__a = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
__a = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
__a = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
__a = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
__a = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
__a = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
__a = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
__a = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
__a = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
__a = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
__a = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = list(state_dict.keys() )
__a = {}
for key in keys:
__a = state_dict.pop(_UpperCAmelCase )
__a = rename_keys(_UpperCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
__a = val[:hidden_size, :]
__a = val[hidden_size : 2 * hidden_size, :]
__a = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__a = val
else:
__a = val
return state_dict, enc_dec_proj_state_dict
def __snake_case ( _UpperCAmelCase ):
if checkpoint == "small":
# default config values
__a = 1024
__a = 24
__a = 16
elif checkpoint == "medium":
__a = 1536
__a = 48
__a = 24
elif checkpoint == "large":
__a = 2048
__a = 48
__a = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
__a = MusicgenDecoderConfig(
hidden_size=_UpperCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , )
return config
@torch.no_grad()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="cpu" ):
__a = MusicGen.get_pretrained(_UpperCAmelCase , device=_UpperCAmelCase )
__a = decoder_config_from_checkpoint(_UpperCAmelCase )
__a = fairseq_model.lm.state_dict()
__a , __a = rename_state_dict(
_UpperCAmelCase , hidden_size=decoder_config.hidden_size )
__a = TaEncoderModel.from_pretrained('''t5-base''' )
__a = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
__a = MusicgenForCausalLM(_UpperCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__a , __a = decoder.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(_UpperCAmelCase ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
__a = MusicgenForConditionalGeneration(text_encoder=_UpperCAmelCase , audio_encoder=_UpperCAmelCase , decoder=_UpperCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_UpperCAmelCase )
# check we can do a forward pass
__a = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__a = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__a = model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
__a = AutoTokenizer.from_pretrained('''t5-base''' )
__a = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
__a = MusicgenProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# set the appropriate bos/pad token ids
__a = 2048
__a = 2048
# set other default generation config params
__a = int(30 * audio_encoder.config.frame_rate )
__a = True
__a = 3.0
if pytorch_dump_folder is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(_UpperCAmelCase )
processor.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
__snake_case :Optional[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 717
|
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__snake_case :List[str] = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class _A :
UpperCamelCase__ : str
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : Optional[Union[str, int]] = None
UpperCamelCase__ : Optional[Union[str, int]] = None
UpperCamelCase__ : Optional[Union[str, int]] = None
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a , __a , __a = _str_to_version_tuple(self.version_str)
def __repr__( self : Tuple):
'''simple docstring'''
return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return self.major, self.minor, self.patch
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
return Version(__SCREAMING_SNAKE_CASE)
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
return other
raise TypeError(F'{other} (type {type(__SCREAMING_SNAKE_CASE)}) cannot be compared to version.')
def __eq__( self : int , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
try:
__a = self._validate_operand(__SCREAMING_SNAKE_CASE)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : str , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = self._validate_operand(__SCREAMING_SNAKE_CASE)
return self.tuple < other.tuple
def __hash__( self : Optional[Any]):
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple))
@classmethod
def _lowerCamelCase ( cls : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
__a = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def _lowerCamelCase ( self : int):
'''simple docstring'''
return self.version_str
def __snake_case ( _UpperCAmelCase ):
__a = _VERSION_REG.match(_UpperCAmelCase )
if not res:
raise ValueError(f'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(_UpperCAmelCase ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def __snake_case ( _UpperCAmelCase ):
return ".".join(str(_UpperCAmelCase ) for v in version_tuple )
| 60
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__SCREAMING_SNAKE_CASE = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 688
|
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : str ) -> Dict:
'''simple docstring'''
a__ : List[str] = False
def __lowerCAmelCase ( self : Tuple , A__ : Optional[int] , A__ : Optional[Any] , A__ : List[str] , A__ : Tuple ) -> Optional[int]:
'''simple docstring'''
if not self.initialized:
a__ : Optional[Any] = RagRetriever(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , index=A__ , init_retrieval=A__ , )
a__ : Union[str, Any] = True
def __lowerCAmelCase ( self : Tuple ) -> Tuple:
'''simple docstring'''
self.retriever.index.init_index()
def __lowerCAmelCase ( self : List[Any] , A__ : List[Any] , A__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
a__ , a__ : Optional[Any] = self.retriever._main_retrieve(A__ , A__ )
return doc_ids, retrieved_doc_embeds
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : str , A__ : Optional[int] , A__ : List[Any] , A__ : List[Any] , A__ : str , A__ : Any=None ) -> Optional[Any]:
'''simple docstring'''
if index is not None and index.is_initialized() and len(A__ ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , index=A__ , init_retrieval=A__ , )
a__ : List[str] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A__ , A__ , A__ , A__ )
for worker in self.retrieval_workers
] )
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCAmelCase ( self : Optional[int] , A__ : Optional[int] , A__ : int ) -> Dict:
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
a__ : List[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
a__ , a__ : Tuple = ray.get(random_worker.retrieve.remote(A__ , A__ ) )
else:
a__ , a__ : int = self._main_retrieve(A__ , A__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A__ )
@classmethod
def __lowerCAmelCase ( cls : int , A__ : Optional[Any] , A__ : Any=None , **A__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return super(A__ , cls ).get_tokenizers(A__ , A__ , **A__ )
@classmethod
def __lowerCAmelCase ( cls : int , A__ : Optional[int] , A__ : Union[str, Any] , A__ : Union[str, Any]=None , **A__ : Dict ) -> List[Any]:
'''simple docstring'''
a__ : Dict = kwargs.pop('''config''' , A__ ) or RagConfig.from_pretrained(A__ , **A__ )
a__ : Dict = RagTokenizer.from_pretrained(A__ , config=A__ )
a__ : str = rag_tokenizer.question_encoder
a__ : List[str] = rag_tokenizer.generator
if indexed_dataset is not None:
a__ : List[Any] = '''custom'''
a__ : List[Any] = CustomHFIndex(config.retrieval_vector_size , A__ )
else:
a__ : Optional[Any] = cls._build_index(A__ )
return cls(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , retrieval_workers=A__ , index=A__ , )
| 688
| 1
|
from math import isqrt, loga
def snake_case__ ( UpperCAmelCase : int ):
lowerCAmelCase__ :Optional[int] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase__ :int = False
return [i for i in range(2 , UpperCAmelCase ) if is_prime[i]]
def snake_case__ ( UpperCAmelCase : int = 8_0_0_8_0_0 , UpperCAmelCase : int = 8_0_0_8_0_0 ):
lowerCAmelCase__ :str = degree * loga(UpperCAmelCase )
lowerCAmelCase__ :Any = int(UpperCAmelCase )
lowerCAmelCase__ :List[Any] = calculate_prime_numbers(UpperCAmelCase )
lowerCAmelCase__ :List[str] = 0
lowerCAmelCase__ :List[str] = 0
lowerCAmelCase__ :Optional[Any] = len(UpperCAmelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 111
|
def snake_case__ ( UpperCAmelCase : float ):
if edge <= 0 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((2_5 + 1_0 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def snake_case__ ( UpperCAmelCase : float ):
if edge <= 0 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return ((1_5 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 111
| 1
|
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_UpperCamelCase = logging.getLogger(__name__)
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
if os.path.exists(lowerCamelCase__ ):
if os.path.exists(os.path.join(lowerCamelCase__ , """config.json""" ) ) and os.path.isfile(
os.path.join(lowerCamelCase__ , """config.json""" ) ):
os.remove(os.path.join(lowerCamelCase__ , """config.json""" ) )
if os.path.exists(os.path.join(lowerCamelCase__ , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(lowerCamelCase__ , """pytorch_model.bin""" ) ):
os.remove(os.path.join(lowerCamelCase__ , """pytorch_model.bin""" ) )
else:
os.makedirs(lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
def _a ( _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase = 2
if unlogit:
UpperCAmelCase = torch.pow(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase = p * torch.log(lowerCamelCase__ )
UpperCAmelCase = 0
return -plogp.sum(dim=-1 )
def _a ( _snake_case ):
"""simple docstring"""
logger.info("""lv, h >\t""" + """\t""".join(F'''{x + 1}''' for x in range(len(lowerCamelCase__ ) ) ) )
for row in range(len(lowerCamelCase__ ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + """\t""".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + """\t""".join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=True , _snake_case=True , _snake_case=None , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase = torch.zeros(lowerCamelCase__ , lowerCamelCase__ ).to(args.device )
UpperCAmelCase = torch.zeros(lowerCamelCase__ , lowerCamelCase__ ).to(args.device )
if head_mask is None:
UpperCAmelCase = torch.ones(lowerCamelCase__ , lowerCamelCase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCamelCase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase = None
UpperCAmelCase = 0.0
UpperCAmelCase = 0.0
for step, inputs in enumerate(tqdm(lowerCamelCase__ , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase = tuple(t.to(args.device ) for t in inputs )
(UpperCAmelCase ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase = model(lowerCamelCase__ , labels=lowerCamelCase__ , head_mask=lowerCamelCase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCamelCase__ ):
UpperCAmelCase = entropy(attn.detach() , lowerCamelCase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCamelCase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase = 2
UpperCAmelCase = torch.pow(torch.pow(lowerCamelCase__ , lowerCamelCase__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
UpperCAmelCase = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(lowerCamelCase__ )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(lowerCamelCase__ )
logger.info("""Head ranked by importance scores""" )
UpperCAmelCase = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase = head_ranks.view_as(lowerCamelCase__ )
print_ad_tensor(lowerCamelCase__ )
return attn_entropy, head_importance, total_loss
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = compute_heads_importance(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , compute_entropy=lowerCamelCase__ )
UpperCAmelCase = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , lowerCamelCase__ , original_score * args.masking_threshold )
UpperCAmelCase = torch.ones_like(lowerCamelCase__ )
UpperCAmelCase = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase = float("""Inf""" )
UpperCAmelCase = head_importance.view(-1 ).sort()[1]
if len(lowerCamelCase__ ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
UpperCAmelCase = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase = new_head_mask.view(-1 )
UpperCAmelCase = 0.0
UpperCAmelCase = new_head_mask.view_as(lowerCamelCase__ )
UpperCAmelCase = new_head_mask.clone().detach()
print_ad_tensor(lowerCamelCase__ )
# Compute metric and head importance again
UpperCAmelCase = compute_heads_importance(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , compute_entropy=lowerCamelCase__ , head_mask=lowerCamelCase__ )
UpperCAmelCase = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , lowerCamelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("""Final head mask""" )
print_ad_tensor(lowerCamelCase__ )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def _a ( _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = datetime.now()
UpperCAmelCase = compute_heads_importance(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , compute_entropy=lowerCamelCase__ , compute_importance=lowerCamelCase__ , head_mask=lowerCamelCase__ )
UpperCAmelCase = 1 / loss
UpperCAmelCase = datetime.now() - before_time
UpperCAmelCase = sum(p.numel() for p in model.parameters() )
UpperCAmelCase = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCamelCase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase = [
v,
]
assert sum(len(lowerCamelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCamelCase__ )
UpperCAmelCase = sum(p.numel() for p in model.parameters() )
UpperCAmelCase = datetime.now()
UpperCAmelCase = compute_heads_importance(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , compute_entropy=lowerCamelCase__ , compute_importance=lowerCamelCase__ , head_mask=lowerCamelCase__ , actually_pruned=lowerCamelCase__ , )
UpperCAmelCase = 1 / loss
UpperCAmelCase = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , lowerCamelCase__ , lowerCamelCase__ , pruned_num_params / original_num_params * 100 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , lowerCamelCase__ , lowerCamelCase__ )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100 )
save_model(lowerCamelCase__ , args.output_dir )
def _a ( ):
"""simple docstring"""
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=lowerCamelCase__ , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=lowerCamelCase__ , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=lowerCamelCase__ , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don\'t normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don\'t normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=lowerCamelCase__ , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=lowerCamelCase__ , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=lowerCamelCase__ , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=lowerCamelCase__ , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=lowerCamelCase__ , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=lowerCamelCase__ , default=42 )
parser.add_argument("""--local_rank""" , type=lowerCamelCase__ , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=lowerCamelCase__ , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=lowerCamelCase__ , default="""""" , help="""Can be used for distant debugging.""" )
UpperCAmelCase = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
UpperCAmelCase = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase = torch.device("""cuda""" , args.local_rank )
UpperCAmelCase = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase = nn.parallel.DistributedDataParallel(
lowerCamelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCamelCase__ )
elif args.n_gpu > 1:
UpperCAmelCase = nn.DataParallel(lowerCamelCase__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCamelCase__ )
torch.save(lowerCamelCase__ , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase__ )
# Prepare dataset
UpperCAmelCase = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase = (torch.from_numpy(lowerCamelCase__ ),)
UpperCAmelCase = TensorDataset(*lowerCamelCase__ )
UpperCAmelCase = RandomSampler(lowerCamelCase__ )
UpperCAmelCase = DataLoader(lowerCamelCase__ , sampler=lowerCamelCase__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase = mask_heads(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
prune_heads(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 341
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="""%(message)s""")
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> np.ndarray:
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : str = np.nan
for i in range(lowerCamelCase__ ):
__lowerCamelCase : int = features[:, labels == i]
__lowerCamelCase : Optional[int] = data.mean(1 )
# Centralize the data of class i
__lowerCamelCase : int = data - column_reshape(lowerCamelCase__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowerCamelCase__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowerCamelCase : Union[str, Any] = np.dot(lowerCamelCase__ , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : Optional[Any] = features.mean(1 )
__lowerCamelCase : Union[str, Any] = np.nan
for i in range(lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = features[:, labels == i]
__lowerCamelCase : Union[str, Any] = data.shape[1]
__lowerCamelCase : Union[str, Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ ) , (column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowerCamelCase : List[str] = device_data * np.dot(
column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ ) , (column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
# Check if the features have been loaded
if features.any():
__lowerCamelCase : Tuple = features.mean(1 )
# Center the dataset
__lowerCamelCase : Any = features - np.reshape(lowerCamelCase__ , (data_mean.size, 1) )
__lowerCamelCase : Optional[int] = np.dot(lowerCamelCase__ , centered_data.T ) / features.shape[1]
__lowerCamelCase , __lowerCamelCase : List[Any] = np.linalg.eigh(lowerCamelCase__ )
# Take all the columns in the reverse order (-1), and then takes only the first
__lowerCamelCase : Dict = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
__lowerCamelCase : int = np.dot(filtered_eigenvectors.T , lowerCamelCase__ )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase__ )
logging.error('Dataset empty' )
raise AssertionError
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
assert classes > dimensions
# Check if features have been already loaded
if features.any:
__lowerCamelCase , __lowerCamelCase : Dict = eigh(
covariance_between_classes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , covariance_within_classes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , )
__lowerCamelCase : Union[str, Any] = eigenvectors[:, ::-1][:, :dimensions]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = np.linalg.svd(lowerCamelCase__ )
__lowerCamelCase : int = svd_matrix[:, 0:dimensions]
__lowerCamelCase : Optional[int] = np.dot(filtered_svd_matrix.T , lowerCamelCase__ )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase__ )
logging.error('Dataset empty' )
raise AssertionError
def SCREAMING_SNAKE_CASE__ ( ) -> None:
# Create dummy dataset with 2 classes and 3 features
__lowerCamelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
__lowerCamelCase : Optional[int] = np.array([0, 0, 0, 1, 1] )
__lowerCamelCase : Optional[Any] = 2
__lowerCamelCase : Tuple = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowerCamelCase__ ) as error_info:
__lowerCamelCase : int = linear_discriminant_analysis(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if isinstance(lowerCamelCase__ , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__ ( ) -> None:
__lowerCamelCase : Dict = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
__lowerCamelCase : Dict = 2
__lowerCamelCase : int = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] )
with pytest.raises(lowerCamelCase__ ) as error_info:
__lowerCamelCase : Optional[Any] = principal_component_analysis(lowerCamelCase__ , lowerCamelCase__ )
if not np.allclose(lowerCamelCase__ , lowerCamelCase__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652
| 0
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =0
def __A ( self : List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(_UpperCamelCase ,_UpperCamelCase )
def __A ( self : str ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ =Path(_UpperCamelCase ) / """preprocessor_config.json"""
SCREAMING_SNAKE_CASE__ =Path(_UpperCamelCase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(_UpperCamelCase ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(_UpperCamelCase ,"""w""" ) )
SCREAMING_SNAKE_CASE__ =AutoImageProcessor.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase ,_UpperCamelCase )
def __A ( self : str ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ =Path(_UpperCamelCase ) / """preprocessor_config.json"""
SCREAMING_SNAKE_CASE__ =Path(_UpperCamelCase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} ,open(_UpperCamelCase ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(_UpperCamelCase ,"""w""" ) )
SCREAMING_SNAKE_CASE__ =AutoImageProcessor.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase ,_UpperCamelCase )
def __A ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ =CLIPConfig()
# Create a dummy config file with image_proceesor_type
SCREAMING_SNAKE_CASE__ =Path(_UpperCamelCase ) / """preprocessor_config.json"""
SCREAMING_SNAKE_CASE__ =Path(_UpperCamelCase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(_UpperCamelCase ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(_UpperCamelCase ,"""w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
SCREAMING_SNAKE_CASE__ =AutoImageProcessor.from_pretrained(_UpperCamelCase ).to_dict()
config_dict.pop("""image_processor_type""" )
SCREAMING_SNAKE_CASE__ =CLIPImageProcessor(**_UpperCamelCase )
# save in new folder
model_config.save_pretrained(_UpperCamelCase )
config.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =AutoImageProcessor.from_pretrained(_UpperCamelCase )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE__ =json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(_UpperCamelCase ,_UpperCamelCase )
def __A ( self : Tuple ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ =Path(_UpperCamelCase ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(_UpperCamelCase ,"""w""" ) ,)
SCREAMING_SNAKE_CASE__ =AutoImageProcessor.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase ,_UpperCamelCase )
def __A ( self : str ) -> int:
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCamelCase ,"""clip-base is not a local folder and is not a valid model identifier""" ):
SCREAMING_SNAKE_CASE__ =AutoImageProcessor.from_pretrained("""clip-base""" )
def __A ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCamelCase ,r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
SCREAMING_SNAKE_CASE__ =AutoImageProcessor.from_pretrained(_UpperCamelCase ,revision="""aaaaaa""" )
def __A ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCamelCase ,"""hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" ,):
SCREAMING_SNAKE_CASE__ =AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def __A ( self : Any ) -> Dict:
'''simple docstring'''
with self.assertRaises(_UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=_UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =AutoImageProcessor.from_pretrained(_UpperCamelCase ,trust_remote_code=_UpperCamelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ ,"""NewImageProcessor""" )
def __A ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
try:
AutoConfig.register("""custom""" ,_UpperCamelCase )
AutoImageProcessor.register(_UpperCamelCase ,_UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCamelCase ):
AutoImageProcessor.register(_UpperCamelCase ,_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ =Path(_UpperCamelCase ) / """preprocessor_config.json"""
SCREAMING_SNAKE_CASE__ =Path(_UpperCamelCase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} ,open(_UpperCamelCase ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(_UpperCamelCase ,"""w""" ) )
SCREAMING_SNAKE_CASE__ =CustomImageProcessor.from_pretrained(_UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =AutoImageProcessor.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase ,_UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __A ( self : int ) -> Dict:
'''simple docstring'''
class __a ( __lowerCamelCase ):
"""simple docstring"""
_A : Dict = True
try:
AutoConfig.register("""custom""" ,_UpperCamelCase )
AutoImageProcessor.register(_UpperCamelCase ,_UpperCamelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ =AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ =AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=_UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ =AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=_UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(not hasattr(_UpperCamelCase ,"""is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 588
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __a ( __lowerCamelCase ):
"""simple docstring"""
_A : torch.FloatTensor
class __a ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : str ,_UpperCamelCase : int = 3_2 ,_UpperCamelCase : int = 6_4 ,_UpperCamelCase : int = 2_0 ,_UpperCamelCase : int = 7_6_8 ,_UpperCamelCase : Tuple=7_7 ,_UpperCamelCase : List[str]=4 ,_UpperCamelCase : float = 0.0 ,_UpperCamelCase : str = "silu" ,_UpperCamelCase : Optional[str] = None ,_UpperCamelCase : Optional[str] = None ,_UpperCamelCase : Optional[str] = "linear" ,_UpperCamelCase : Optional[str] = "prd" ,_UpperCamelCase : Optional[int] = None ,_UpperCamelCase : Optional[int] = None ,_UpperCamelCase : Optional[int] = None ,) -> int:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE__ =num_attention_heads
SCREAMING_SNAKE_CASE__ =attention_head_dim
SCREAMING_SNAKE_CASE__ =num_attention_heads * attention_head_dim
SCREAMING_SNAKE_CASE__ =additional_embeddings
SCREAMING_SNAKE_CASE__ =time_embed_dim or inner_dim
SCREAMING_SNAKE_CASE__ =embedding_proj_dim or embedding_dim
SCREAMING_SNAKE_CASE__ =clip_embed_dim or embedding_dim
SCREAMING_SNAKE_CASE__ =Timesteps(_UpperCamelCase ,_UpperCamelCase ,0 )
SCREAMING_SNAKE_CASE__ =TimestepEmbedding(_UpperCamelCase ,_UpperCamelCase ,out_dim=_UpperCamelCase ,act_fn=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =nn.Linear(_UpperCamelCase ,_UpperCamelCase )
if embedding_proj_norm_type is None:
SCREAMING_SNAKE_CASE__ =None
elif embedding_proj_norm_type == "layer":
SCREAMING_SNAKE_CASE__ =nn.LayerNorm(_UpperCamelCase )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
SCREAMING_SNAKE_CASE__ =nn.Linear(_UpperCamelCase ,_UpperCamelCase )
if encoder_hid_proj_type is None:
SCREAMING_SNAKE_CASE__ =None
elif encoder_hid_proj_type == "linear":
SCREAMING_SNAKE_CASE__ =nn.Linear(_UpperCamelCase ,_UpperCamelCase )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
SCREAMING_SNAKE_CASE__ =nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,_UpperCamelCase ) )
if added_emb_type == "prd":
SCREAMING_SNAKE_CASE__ =nn.Parameter(torch.zeros(1 ,1 ,_UpperCamelCase ) )
elif added_emb_type is None:
SCREAMING_SNAKE_CASE__ =None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
SCREAMING_SNAKE_CASE__ =nn.ModuleList(
[
BasicTransformerBlock(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,dropout=_UpperCamelCase ,activation_fn="""gelu""" ,attention_bias=_UpperCamelCase ,)
for d in range(_UpperCamelCase )
] )
if norm_in_type == "layer":
SCREAMING_SNAKE_CASE__ =nn.LayerNorm(_UpperCamelCase )
elif norm_in_type is None:
SCREAMING_SNAKE_CASE__ =None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
SCREAMING_SNAKE_CASE__ =nn.LayerNorm(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =nn.Linear(_UpperCamelCase ,_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_0000.0 )
causal_attention_mask.triu_(1 )
SCREAMING_SNAKE_CASE__ =causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" ,_UpperCamelCase ,persistent=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =nn.Parameter(torch.zeros(1 ,_UpperCamelCase ) )
SCREAMING_SNAKE_CASE__ =nn.Parameter(torch.zeros(1 ,_UpperCamelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __A ( self : Dict ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ={}
def fn_recursive_add_processors(_UpperCamelCase : str ,_UpperCamelCase : torch.nn.Module ,_UpperCamelCase : Dict[str, AttentionProcessor] ):
if hasattr(_UpperCamelCase ,"""set_processor""" ):
SCREAMING_SNAKE_CASE__ =module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" ,_UpperCamelCase ,_UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
return processors
def __A ( self : Dict ,_UpperCamelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =len(self.attn_processors.keys() )
if isinstance(_UpperCamelCase ,_UpperCamelCase ) and len(_UpperCamelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_UpperCamelCase : str ,_UpperCamelCase : torch.nn.Module ,_UpperCamelCase : Optional[Any] ):
if hasattr(_UpperCamelCase ,"""set_processor""" ):
if not isinstance(_UpperCamelCase ,_UpperCamelCase ):
module.set_processor(_UpperCamelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" ,_UpperCamelCase ,_UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
def __A ( self : Any ) -> str:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def __A ( self : Optional[Any] ,_UpperCamelCase : List[str] ,_UpperCamelCase : Union[torch.Tensor, float, int] ,_UpperCamelCase : torch.FloatTensor ,_UpperCamelCase : Optional[torch.FloatTensor] = None ,_UpperCamelCase : Optional[torch.BoolTensor] = None ,_UpperCamelCase : bool = True ,) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =hidden_states.shape[0]
SCREAMING_SNAKE_CASE__ =timestep
if not torch.is_tensor(_UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(_UpperCamelCase ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE__ =timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE__ =timesteps * torch.ones(_UpperCamelCase ,dtype=timesteps.dtype ,device=timesteps.device )
SCREAMING_SNAKE_CASE__ =self.time_proj(_UpperCamelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
SCREAMING_SNAKE_CASE__ =timesteps_projected.to(dtype=self.dtype )
SCREAMING_SNAKE_CASE__ =self.time_embedding(_UpperCamelCase )
if self.embedding_proj_norm is not None:
SCREAMING_SNAKE_CASE__ =self.embedding_proj_norm(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =self.embedding_proj(_UpperCamelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE__ =self.encoder_hidden_states_proj(_UpperCamelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
SCREAMING_SNAKE_CASE__ =self.proj_in(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =self.positional_embedding.to(hidden_states.dtype )
SCREAMING_SNAKE_CASE__ =[]
SCREAMING_SNAKE_CASE__ =0
if encoder_hidden_states is not None:
additional_embeds.append(_UpperCamelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
SCREAMING_SNAKE_CASE__ =proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
SCREAMING_SNAKE_CASE__ =hidden_states[:, None, :]
SCREAMING_SNAKE_CASE__ =additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
SCREAMING_SNAKE_CASE__ =self.prd_embedding.to(hidden_states.dtype ).expand(_UpperCamelCase ,-1 ,-1 )
additional_embeds.append(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =torch.cat(
_UpperCamelCase ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
SCREAMING_SNAKE_CASE__ =additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
SCREAMING_SNAKE_CASE__ =F.pad(
_UpperCamelCase ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
SCREAMING_SNAKE_CASE__ =hidden_states + positional_embeddings
if attention_mask is not None:
SCREAMING_SNAKE_CASE__ =(1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
SCREAMING_SNAKE_CASE__ =F.pad(_UpperCamelCase ,(0, self.additional_embeddings) ,value=0.0 )
SCREAMING_SNAKE_CASE__ =(attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
SCREAMING_SNAKE_CASE__ =attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
SCREAMING_SNAKE_CASE__ =self.norm_in(_UpperCamelCase )
for block in self.transformer_blocks:
SCREAMING_SNAKE_CASE__ =block(_UpperCamelCase ,attention_mask=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =self.norm_out(_UpperCamelCase )
if self.prd_embedding is not None:
SCREAMING_SNAKE_CASE__ =hidden_states[:, -1]
else:
SCREAMING_SNAKE_CASE__ =hidden_states[:, additional_embeddings_len:]
SCREAMING_SNAKE_CASE__ =self.proj_to_clip_embeddings(_UpperCamelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_UpperCamelCase )
def __A ( self : Union[str, Any] ,_UpperCamelCase : Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =(prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 588
| 1
|
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
def A__ ( *__A : Any , **__A : int ) ->Any:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : Any , **__A : str ) ->int:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : str , **__A : Tuple ) ->Dict:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : Tuple , **__A : Dict ) ->Dict:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : int , **__A : Optional[int] ) ->List[Any]:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : List[str] , **__A : Optional[int] ) ->Any:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : str , **__A : List[Any] ) ->str:
requires_backends(__A , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
| 184
|
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_lowerCamelCase : List[Any] = logging.getLogger(__name__)
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , lowercase__=-1 ):
'''simple docstring'''
__A =label_idx
def __UpperCamelCase ( self , lowercase__ , lowercase__ ):
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
__A =mode.value
__A =os.path.join(lowercase__ , f'''{mode}.txt''' )
__A =1
__A =[]
with open(lowercase__ , encoding='''utf-8''' ) as f:
__A =[]
__A =[]
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
__A =[]
__A =[]
else:
__A =line.split(''' ''' )
words.append(splits[0] )
if len(lowercase__ ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowercase__ , labels=lowercase__ ) )
return examples
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(lowercase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__A =line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(lowercase__ )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
if path:
with open(lowercase__ , '''r''' ) as f:
__A =f.read().splitlines()
if "O" not in labels:
__A =['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
if path:
with open(lowercase__ , '''r''' ) as f:
__A =f.read().splitlines()
if "O" not in labels:
__A =['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __UpperCamelCase ( self , lowercase__ , lowercase__ ):
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
__A =mode.value
__A =os.path.join(lowercase__ , f'''{mode}.txt''' )
__A =1
__A =[]
with open(lowercase__ , encoding='''utf-8''' ) as f:
for sentence in parse_incr(lowercase__ ):
__A =[]
__A =[]
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(lowercase__ ) == len(lowercase__ )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
return examples
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =0
for sentence in parse_incr(lowercase__ ):
__A =preds_list[example_id]
__A =''''''
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(lowercase__ )
example_id += 1
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
if path:
with open(lowercase__ , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 184
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] )-> int:
"""simple docstring"""
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = SamImageProcessor()
UpperCAmelCase = SamProcessor(lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__( self : List[Any] , **lowerCAmelCase : int )-> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase ).image_processor
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__( self : List[str] )-> str:
"""simple docstring"""
UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase = [Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = self.get_image_processor(do_normalize=lowerCAmelCase , padding_value=1.0 )
UpperCAmelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase )
def a__( self : str )-> str:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=lowerCAmelCase )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = image_processor(lowerCAmelCase , return_tensors='''np''' )
UpperCAmelCase = processor(images=lowerCAmelCase , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def a__( self : List[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=lowerCAmelCase )
UpperCAmelCase = [torch.ones((1, 3, 5, 5) )]
UpperCAmelCase = [[1764, 2646]]
UpperCAmelCase = [[683, 1024]]
UpperCAmelCase = processor.post_process_masks(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCAmelCase = processor.post_process_masks(
lowerCAmelCase , torch.tensor(lowerCAmelCase ) , torch.tensor(lowerCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
UpperCAmelCase = [np.ones((1, 3, 5, 5) )]
UpperCAmelCase = processor.post_process_masks(lowerCAmelCase , np.array(lowerCAmelCase ) , np.array(lowerCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCAmelCase = [[1, 0], [0, 1]]
with self.assertRaises(lowerCAmelCase ):
UpperCAmelCase = processor.post_process_masks(lowerCAmelCase , np.array(lowerCAmelCase ) , np.array(lowerCAmelCase ) )
@require_vision
@require_tf
class UpperCamelCase__( unittest.TestCase ):
def a__( self : int )-> str:
"""simple docstring"""
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = SamImageProcessor()
UpperCAmelCase = SamProcessor(lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__( self : Any , **lowerCAmelCase : Union[str, Any] )-> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase ).image_processor
def a__( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__( self : Any )-> List[str]:
"""simple docstring"""
UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase = [Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = self.get_image_processor(do_normalize=lowerCAmelCase , padding_value=1.0 )
UpperCAmelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase )
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=lowerCAmelCase )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = image_processor(lowerCAmelCase , return_tensors='''np''' )
UpperCAmelCase = processor(images=lowerCAmelCase , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def a__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=lowerCAmelCase )
UpperCAmelCase = [tf.ones((1, 3, 5, 5) )]
UpperCAmelCase = [[1764, 2646]]
UpperCAmelCase = [[683, 1024]]
UpperCAmelCase = processor.post_process_masks(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCAmelCase = processor.post_process_masks(
lowerCAmelCase , tf.convert_to_tensor(lowerCAmelCase ) , tf.convert_to_tensor(lowerCAmelCase ) , return_tensors='''tf''' , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
UpperCAmelCase = [np.ones((1, 3, 5, 5) )]
UpperCAmelCase = processor.post_process_masks(
lowerCAmelCase , np.array(lowerCAmelCase ) , np.array(lowerCAmelCase ) , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCAmelCase = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
UpperCAmelCase = processor.post_process_masks(
lowerCAmelCase , np.array(lowerCAmelCase ) , np.array(lowerCAmelCase ) , return_tensors='''tf''' )
@require_vision
@require_torchvision
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = SamImageProcessor()
UpperCAmelCase = SamProcessor(lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__( self : str , **lowerCAmelCase : Optional[int] )-> int:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase ).image_processor
def a__( self : List[Any] )-> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__( self : int )-> Any:
"""simple docstring"""
UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase = [Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def a__( self : int )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=lowerCAmelCase )
UpperCAmelCase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
UpperCAmelCase = [tf.convert_to_tensor(lowerCAmelCase )]
UpperCAmelCase = [torch.tensor(lowerCAmelCase )]
UpperCAmelCase = [[1764, 2646]]
UpperCAmelCase = [[683, 1024]]
UpperCAmelCase = processor.post_process_masks(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , return_tensors='''tf''' )
UpperCAmelCase = processor.post_process_masks(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def a__( self : Dict )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=lowerCAmelCase )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = image_processor(lowerCAmelCase , return_tensors='''pt''' )['''pixel_values'''].numpy()
UpperCAmelCase = processor(images=lowerCAmelCase , return_tensors='''pt''' )['''pixel_values'''].numpy()
UpperCAmelCase = image_processor(lowerCAmelCase , return_tensors='''tf''' )['''pixel_values'''].numpy()
UpperCAmelCase = processor(images=lowerCAmelCase , return_tensors='''tf''' )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase ) )
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase ) )
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase ) )
| 50
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__( metaclass=lowerCAmelCase ):
__magic_name__ : List[str] = ["note_seq"]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : int )-> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def a__( cls : Dict , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int] )-> Dict:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def a__( cls : int , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 50
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
"tokenization_perceiver": ["PerceiverTokenizer"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["PerceiverFeatureExtractor"]
_snake_case = ["PerceiverImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 307
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase__ =logging.get_logger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['pixel_values']
def __init__( self , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = PILImageResampling.BICUBIC , __lowerCamelCase = True , __lowerCamelCase = True , __lowerCamelCase = 1 / 2_5_5 , __lowerCamelCase = None , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = None , **__lowerCamelCase , ) -> None:
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = size if size is not None else {"height": 2_2_4, "width": 2_2_4}
_SCREAMING_SNAKE_CASE : int = get_size_dict(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
_SCREAMING_SNAKE_CASE : Tuple = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase , param_name="crop_size" )
_SCREAMING_SNAKE_CASE : Optional[Any] = do_resize
_SCREAMING_SNAKE_CASE : str = do_rescale
_SCREAMING_SNAKE_CASE : Tuple = do_normalize
_SCREAMING_SNAKE_CASE : Any = do_center_crop
_SCREAMING_SNAKE_CASE : Dict = crop_size
_SCREAMING_SNAKE_CASE : Optional[int] = size
_SCREAMING_SNAKE_CASE : int = resample
_SCREAMING_SNAKE_CASE : Optional[Any] = rescale_factor
_SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_SCREAMING_SNAKE_CASE : Any = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = PILImageResampling.BILINEAR , __lowerCamelCase = None , **__lowerCamelCase , ) -> np.ndarray:
_SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(__lowerCamelCase )
if "shortest_edge" in size:
_SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(__lowerCamelCase , size=size["shortest_edge"] , default_to_square=__lowerCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_SCREAMING_SNAKE_CASE : Optional[Any] = (size["height"], size["width"])
else:
raise ValueError(F"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""" )
return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase , ) -> np.ndarray:
_SCREAMING_SNAKE_CASE : Tuple = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__lowerCamelCase , size=(size["height"], size["width"]) , data_format=__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase ) -> np.ndarray:
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase , ) -> np.ndarray:
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = ChannelDimension.FIRST , **__lowerCamelCase , ) -> BatchFeature:
_SCREAMING_SNAKE_CASE : str = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_SCREAMING_SNAKE_CASE : Dict = crop_size if crop_size is not None else self.crop_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(__lowerCamelCase , param_name="crop_size" , default_to_square=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE : Any = image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else self.size
_SCREAMING_SNAKE_CASE : Dict = get_size_dict(__lowerCamelCase )
if not is_batched(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = [images]
if not valid_images(__lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE : str = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE : Optional[int] = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_center_crop:
_SCREAMING_SNAKE_CASE : Any = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase ) for image in images]
if do_rescale:
_SCREAMING_SNAKE_CASE : int = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE : Any = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images]
_SCREAMING_SNAKE_CASE : List[Any] = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
_SCREAMING_SNAKE_CASE : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
| 249
| 0
|
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowerCamelCase_ : Dict = HfApi()
lowerCamelCase_ : str = {}
# fmt: off
lowerCamelCase_ : Tuple = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
lowerCamelCase_ : List[str] = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
lowerCamelCase_ : Dict = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
lowerCamelCase_ : Optional[Any] = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
lowerCamelCase_ : str = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
lowerCamelCase_ : Optional[int] = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
lowerCamelCase_ : Optional[int] = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
lowerCamelCase_ : Union[str, Any] = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
lowerCamelCase_ : int = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
lowerCamelCase_ : Any = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
lowerCamelCase_ : Optional[Any] = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
lowerCamelCase_ : List[str] = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
lowerCamelCase_ : str = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
lowerCamelCase_ : Optional[Any] = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
lowerCamelCase_ : Optional[int] = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
lowerCamelCase_ : Optional[Any] = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowerCamelCase_ : List[str] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("CompVis"):
lowerCamelCase_ : str = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
lowerCamelCase_ : int = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowerCamelCase_ : Union[str, Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowerCamelCase_ : Union[str, Any] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowerCamelCase_ : Union[str, Any] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 710
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 345
| 0
|
"""simple docstring"""
import os
def A_ ( snake_case_ : Any ):
'''simple docstring'''
UpperCamelCase : str = len(grid[0] )
UpperCamelCase : int = len(snake_case_ )
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Union[str, Any] = 0
UpperCamelCase : Optional[Any] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(snake_case_ ):
for j in range(n_rows - 3 ):
UpperCamelCase : Optional[int] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
UpperCamelCase : List[str] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
UpperCamelCase : Any = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
UpperCamelCase : str = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
UpperCamelCase : str = max(
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
if max_product > largest:
UpperCamelCase : Optional[int] = max_product
return largest
def A_ ( ):
'''simple docstring'''
UpperCamelCase : List[str] = []
with open(os.path.dirname(snake_case_ ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
UpperCamelCase : int = [[int(snake_case_ ) for i in grid[j]] for j in range(len(snake_case_ ) )]
return largest_product(snake_case_ )
if __name__ == "__main__":
print(solution())
| 499
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__A : List[str] = get_logger(__name__)
__A : Optional[int] = Path(__file__).parent / '''model_card_template.md'''
__A : Tuple = uuida().hex
__A : Optional[int] = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
__A : List[Any] = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
__A : str = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def A_ ( snake_case_ : Union[Dict, str, None] = None ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'; torch/{_torch_version}'
if is_flax_available():
ua += f'; jax/{_jax_version}'
ua += f'; flax/{_flax_version}'
if is_onnx_available():
ua += f'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get("""DIFFUSERS_IS_CI""" ,"""""" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(snake_case_ ,snake_case_ ):
ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(snake_case_ ,snake_case_ ):
ua += "; " + user_agent
return ua
def A_ ( snake_case_ : str ,snake_case_ : Optional[str] = None ,snake_case_ : Optional[str] = None ):
'''simple docstring'''
if token is None:
UpperCamelCase : Tuple = HfFolder.get_token()
if organization is None:
UpperCamelCase : Optional[Any] = whoami(snake_case_ )["""name"""]
return f'{username}/{model_id}'
else:
return f'{organization}/{model_id}'
def A_ ( snake_case_ : str ,snake_case_ : Any ):
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
"""Modelcard rendering is based on Jinja templates."""
""" Please make sure to have `jinja` installed before using `create_model_card`."""
""" To install it, please run `pip install Jinja2`.""" )
if hasattr(snake_case_ ,"""local_rank""" ) and args.local_rank not in [-1, 0]:
return
UpperCamelCase : Tuple = args.hub_token if hasattr(snake_case_ ,"""hub_token""" ) else None
UpperCamelCase : List[str] = get_full_repo_name(snake_case_ ,token=snake_case_ )
UpperCamelCase : Union[str, Any] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="""en""" ,license="""apache-2.0""" ,library_name="""diffusers""" ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=snake_case_ ,model_name=snake_case_ ,repo_name=snake_case_ ,dataset_name=args.dataset_name if hasattr(snake_case_ ,"""dataset_name""" ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(snake_case_ ,"""gradient_accumulation_steps""" ) else None
) ,adam_betaa=args.adam_betaa if hasattr(snake_case_ ,"""adam_beta1""" ) else None ,adam_betaa=args.adam_betaa if hasattr(snake_case_ ,"""adam_beta2""" ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(snake_case_ ,"""adam_weight_decay""" ) else None ,adam_epsilon=args.adam_epsilon if hasattr(snake_case_ ,"""adam_epsilon""" ) else None ,lr_scheduler=args.lr_scheduler if hasattr(snake_case_ ,"""lr_scheduler""" ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case_ ,"""lr_warmup_steps""" ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case_ ,"""ema_inv_gamma""" ) else None ,ema_power=args.ema_power if hasattr(snake_case_ ,"""ema_power""" ) else None ,ema_max_decay=args.ema_max_decay if hasattr(snake_case_ ,"""ema_max_decay""" ) else None ,mixed_precision=args.mixed_precision ,)
UpperCamelCase : str = os.path.join(args.output_dir ,"""README.md""" )
model_card.save(snake_case_ )
def A_ ( snake_case_ : Optional[str] ,snake_case_ : Optional[str] = None ):
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
UpperCamelCase : List[str] = str(Path(snake_case_ ).as_posix() )
UpperCamelCase : Any = re.search(R"""snapshots/([^/]+)/""" ,snake_case_ )
if search is None:
return None
UpperCamelCase : List[Any] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(snake_case_ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__A : Union[str, Any] = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
__A : Optional[Any] = os.path.join(hf_cache_home, '''diffusers''')
def A_ ( snake_case_ : Optional[str] = None ,snake_case_ : Optional[str] = None ):
'''simple docstring'''
if new_cache_dir is None:
UpperCamelCase : int = DIFFUSERS_CACHE
if old_cache_dir is None:
UpperCamelCase : Optional[Any] = old_diffusers_cache
UpperCamelCase : Tuple = Path(snake_case_ ).expanduser()
UpperCamelCase : Tuple = Path(snake_case_ ).expanduser()
for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
UpperCamelCase : str = new_cache_dir / old_blob_path.relative_to(snake_case_ )
new_blob_path.parent.mkdir(parents=snake_case_ ,exist_ok=snake_case_ )
os.replace(snake_case_ ,snake_case_ )
try:
os.symlink(snake_case_ ,snake_case_ )
except OSError:
logger.warning(
"""Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__A : str = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
__A : Tuple = 0
else:
with open(cache_version_file) as f:
try:
__A : Optional[int] = int(f.read())
except ValueError:
__A : Union[str, Any] = 0
if cache_version < 1:
__A : List[str] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
__A : Optional[int] = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
F'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
F'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
'''the directory exists and can be written to.'''
)
def A_ ( snake_case_ : str ,snake_case_ : Optional[str] = None ):
'''simple docstring'''
if variant is not None:
UpperCamelCase : Any = weights_name.split(""".""" )
UpperCamelCase : Optional[int] = splits[:-1] + [variant] + splits[-1:]
UpperCamelCase : int = """.""".join(snake_case_ )
return weights_name
def A_ ( snake_case_ : int ,*,
snake_case_ : Dict ,snake_case_ : Optional[Any] ,snake_case_ : Any ,snake_case_ : str ,snake_case_ : List[str] ,snake_case_ : Dict ,snake_case_ : List[str] ,snake_case_ : str ,snake_case_ : Dict ,snake_case_ : Tuple ,snake_case_ : Union[str, Any]=None ,):
'''simple docstring'''
UpperCamelCase : str = str(snake_case_ )
if os.path.isfile(snake_case_ ):
return pretrained_model_name_or_path
elif os.path.isdir(snake_case_ ):
if os.path.isfile(os.path.join(snake_case_ ,snake_case_ ) ):
# Load from a PyTorch checkpoint
UpperCamelCase : Optional[Any] = os.path.join(snake_case_ ,snake_case_ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(snake_case_ ,snake_case_ ,snake_case_ ) ):
UpperCamelCase : List[Any] = os.path.join(snake_case_ ,snake_case_ ,snake_case_ )
return model_file
else:
raise EnvironmentError(
f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(snake_case_ ).base_version ) >= version.parse("""0.20.0""" )
):
try:
UpperCamelCase : List[Any] = hf_hub_download(
snake_case_ ,filename=_add_variant(snake_case_ ,snake_case_ ) ,cache_dir=snake_case_ ,force_download=snake_case_ ,proxies=snake_case_ ,resume_download=snake_case_ ,local_files_only=snake_case_ ,use_auth_token=snake_case_ ,user_agent=snake_case_ ,subfolder=snake_case_ ,revision=revision or commit_hash ,)
warnings.warn(
f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' ,snake_case_ ,)
return model_file
except: # noqa: E722
warnings.warn(
f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case_ ,snake_case_ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case_ ,snake_case_ )}\' so that the correct variant file can be added.' ,snake_case_ ,)
try:
# 2. Load model file as usual
UpperCamelCase : int = hf_hub_download(
snake_case_ ,filename=snake_case_ ,cache_dir=snake_case_ ,force_download=snake_case_ ,proxies=snake_case_ ,resume_download=snake_case_ ,local_files_only=snake_case_ ,use_auth_token=snake_case_ ,user_agent=snake_case_ ,subfolder=snake_case_ ,revision=revision or commit_hash ,)
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
"""listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """
"""token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """
"""login`.""" )
except RevisionNotFoundError:
raise EnvironmentError(
f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
"""this model name. Check the model page at """
f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
f' directory containing a file named {weights_name} or'
""" \nCheckout your internet connection or see how to run the library in"""
""" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""" )
except EnvironmentError:
raise EnvironmentError(
f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
"""'https://huggingface.co/models', make sure you don't have a local directory with the same name. """
f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
f'containing a file named {weights_name}' )
| 499
| 1
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowerCAmelCase_ = get_tests_dir("fixtures")
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self):
# A mock response for an HTTP head request to emulate server down
lowerCamelCase__ = mock.Mock()
lowerCamelCase__ = 5_00
lowerCamelCase__ = {}
lowerCamelCase__ = HTTPError
lowerCamelCase__ = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=UpperCamelCase) as mock_head:
lowerCamelCase__ = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCAmelCase ( self):
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase__ = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json")
def __UpperCAmelCase ( self):
with self.assertRaises(UpperCamelCase):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants")
lowerCamelCase__ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor")
self.assertIsNotNone(UpperCamelCase)
@is_staging_test
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __UpperCAmelCase ( cls):
lowerCamelCase__ = TOKEN
HfFolder.save_token(UpperCamelCase)
@classmethod
def __UpperCAmelCase ( cls):
try:
delete_repo(token=cls._token , repo_id="test-image-processor")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor")
except HTTPError:
pass
def __UpperCAmelCase ( self):
lowerCamelCase__ = ViTImageProcessor.from_pretrained(UpperCamelCase)
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token)
lowerCamelCase__ = ViTImageProcessor.from_pretrained(f"""{USER}/test-image-processor""")
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase , getattr(UpperCamelCase , UpperCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCamelCase , repo_id="test-image-processor" , push_to_hub=UpperCamelCase , use_auth_token=self._token)
lowerCamelCase__ = ViTImageProcessor.from_pretrained(f"""{USER}/test-image-processor""")
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase , getattr(UpperCamelCase , UpperCamelCase))
def __UpperCAmelCase ( self):
lowerCamelCase__ = ViTImageProcessor.from_pretrained(UpperCamelCase)
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token)
lowerCamelCase__ = ViTImageProcessor.from_pretrained("valid_org/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase , getattr(UpperCamelCase , UpperCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCamelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=UpperCamelCase , use_auth_token=self._token)
lowerCamelCase__ = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org")
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase , getattr(UpperCamelCase , UpperCamelCase))
def __UpperCAmelCase ( self):
CustomImageProcessor.register_for_auto_class()
lowerCamelCase__ = CustomImageProcessor.from_pretrained(UpperCamelCase)
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
lowerCamelCase__ = AutoImageProcessor.from_pretrained(
f"""{USER}/test-dynamic-image-processor""" , trust_remote_code=UpperCamelCase)
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor")
| 426
|
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class snake_case_ ( A__ ):
"""simple docstring"""
__lowerCAmelCase : BigBirdConfig
__lowerCAmelCase : jnp.dtype =jnp.floataa
__lowerCAmelCase : bool =True
def __UpperCAmelCase ( self):
super().setup()
lowerCamelCase__ = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *UpperCamelCase , **UpperCamelCase):
lowerCamelCase__ = super().__call__(*UpperCamelCase , **UpperCamelCase)
lowerCamelCase__ = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class snake_case_ ( A__ ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] =FlaxBigBirdForNaturalQuestionsModule
def lowerCAmelCase( a__ : List[str] , a__ : Dict , a__ : Optional[Any] , a__ : List[str] , a__ : str , a__ : Optional[int] ):
'''simple docstring'''
def cross_entropy(a__ : Union[str, Any] , a__ : Any , a__ : List[Any]=None ):
lowerCamelCase__ = logits.shape[-1]
lowerCamelCase__ = (labels[..., None] == jnp.arange(a__ )[None]).astype("f4" )
lowerCamelCase__ = jax.nn.log_softmax(a__ , axis=-1 )
lowerCamelCase__ = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowerCamelCase__ = reduction(a__ )
return loss
lowerCamelCase__ = partial(a__ , reduction=jnp.mean )
lowerCamelCase__ = cross_entropy(a__ , a__ )
lowerCamelCase__ = cross_entropy(a__ , a__ )
lowerCamelCase__ = cross_entropy(a__ , a__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class snake_case_ :
"""simple docstring"""
__lowerCAmelCase : str ="google/bigbird-roberta-base"
__lowerCAmelCase : int =3_0_0_0
__lowerCAmelCase : int =1_0_5_0_0
__lowerCAmelCase : int =1_2_8
__lowerCAmelCase : int =3
__lowerCAmelCase : int =1
__lowerCAmelCase : int =5
# tx_args
__lowerCAmelCase : float =3e-5
__lowerCAmelCase : float =0.0
__lowerCAmelCase : int =2_0_0_0_0
__lowerCAmelCase : float =0.0095
__lowerCAmelCase : str ="bigbird-roberta-natural-questions"
__lowerCAmelCase : str ="training-expt"
__lowerCAmelCase : str ="data/nq-training.jsonl"
__lowerCAmelCase : str ="data/nq-validation.jsonl"
def __UpperCAmelCase ( self):
os.makedirs(self.base_dir , exist_ok=UpperCamelCase)
lowerCamelCase__ = os.path.join(self.base_dir , self.save_dir)
lowerCamelCase__ = self.batch_size_per_device * jax.device_count()
@dataclass
class snake_case_ :
"""simple docstring"""
__lowerCAmelCase : int
__lowerCAmelCase : int =4_0_9_6 # no dynamic padding on TPUs
def __call__( self , UpperCamelCase):
lowerCamelCase__ = self.collate_fn(UpperCamelCase)
lowerCamelCase__ = jax.tree_util.tree_map(UpperCamelCase , UpperCamelCase)
return batch
def __UpperCAmelCase ( self , UpperCamelCase):
lowerCamelCase__ , lowerCamelCase__ = self.fetch_inputs(features["input_ids"])
lowerCamelCase__ = {
"input_ids": jnp.array(UpperCamelCase , dtype=jnp.intaa),
"attention_mask": jnp.array(UpperCamelCase , dtype=jnp.intaa),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa),
}
return batch
def __UpperCAmelCase ( self , UpperCamelCase):
lowerCamelCase__ = [self._fetch_inputs(UpperCamelCase) for ids in input_ids]
return zip(*UpperCamelCase)
def __UpperCAmelCase ( self , UpperCamelCase):
lowerCamelCase__ = [1 for _ in range(len(UpperCamelCase))]
while len(UpperCamelCase) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def lowerCAmelCase( a__ : List[Any] , a__ : Tuple , a__ : Any=None ):
'''simple docstring'''
if seed is not None:
lowerCamelCase__ = dataset.shuffle(seed=a__ )
for i in range(len(a__ ) // batch_size ):
lowerCamelCase__ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(a__ )
@partial(jax.pmap , axis_name="batch" )
def lowerCAmelCase( a__ : List[Any] , a__ : Tuple , **a__ : Union[str, Any] ):
'''simple docstring'''
def loss_fn(a__ : str ):
lowerCamelCase__ = model_inputs.pop("start_labels" )
lowerCamelCase__ = model_inputs.pop("end_labels" )
lowerCamelCase__ = model_inputs.pop("pooled_labels" )
lowerCamelCase__ = state.apply_fn(**a__ , params=a__ , dropout_rng=a__ , train=a__ )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = outputs
return state.loss_fn(
a__ , a__ , a__ , a__ , a__ , a__ , )
lowerCamelCase__ , lowerCamelCase__ = jax.random.split(a__ )
lowerCamelCase__ = jax.value_and_grad(a__ )
lowerCamelCase__ , lowerCamelCase__ = grad_fn(state.params )
lowerCamelCase__ = jax.lax.pmean({"loss": loss} , axis_name="batch" )
lowerCamelCase__ = jax.lax.pmean(a__ , "batch" )
lowerCamelCase__ = state.apply_gradients(grads=a__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def lowerCAmelCase( a__ : int , **a__ : Tuple ):
'''simple docstring'''
lowerCamelCase__ = model_inputs.pop("start_labels" )
lowerCamelCase__ = model_inputs.pop("end_labels" )
lowerCamelCase__ = model_inputs.pop("pooled_labels" )
lowerCamelCase__ = state.apply_fn(**a__ , params=state.params , train=a__ )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = outputs
lowerCamelCase__ = state.loss_fn(a__ , a__ , a__ , a__ , a__ , a__ )
lowerCamelCase__ = jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class snake_case_ ( train_state.TrainState ):
"""simple docstring"""
__lowerCAmelCase : Callable =struct.field(pytree_node=A__ )
@dataclass
class snake_case_ :
"""simple docstring"""
__lowerCAmelCase : Args
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : wandb
__lowerCAmelCase : Callable =None
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None):
lowerCamelCase__ = model.params
lowerCamelCase__ = TrainState.create(
apply_fn=model.__call__ , params=UpperCamelCase , tx=UpperCamelCase , loss_fn=UpperCamelCase , )
if ckpt_dir is not None:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = restore_checkpoint(UpperCamelCase , UpperCamelCase)
lowerCamelCase__ = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
lowerCamelCase__ , lowerCamelCase__ = build_tx(**UpperCamelCase)
lowerCamelCase__ = train_state.TrainState(
step=UpperCamelCase , apply_fn=model.__call__ , params=UpperCamelCase , tx=UpperCamelCase , opt_state=UpperCamelCase , )
lowerCamelCase__ = args
lowerCamelCase__ = data_collator
lowerCamelCase__ = lr
lowerCamelCase__ = params
lowerCamelCase__ = jax_utils.replicate(UpperCamelCase)
return state
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase):
lowerCamelCase__ = self.args
lowerCamelCase__ = len(UpperCamelCase) // args.batch_size
lowerCamelCase__ = jax.random.PRNGKey(0)
lowerCamelCase__ = jax.random.split(UpperCamelCase , jax.device_count())
for epoch in range(args.max_epochs):
lowerCamelCase__ = jnp.array(0 , dtype=jnp.floataa)
lowerCamelCase__ = get_batched_dataset(UpperCamelCase , args.batch_size , seed=UpperCamelCase)
lowerCamelCase__ = 0
for batch in tqdm(UpperCamelCase , total=UpperCamelCase , desc=f"""Running EPOCH-{epoch}"""):
lowerCamelCase__ = self.data_collator(UpperCamelCase)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.train_step_fn(UpperCamelCase , UpperCamelCase , **UpperCamelCase)
running_loss += jax_utils.unreplicate(metrics["loss"])
i += 1
if i % args.logging_steps == 0:
lowerCamelCase__ = jax_utils.unreplicate(state.step)
lowerCamelCase__ = running_loss.item() / i
lowerCamelCase__ = self.scheduler_fn(state_step - 1)
lowerCamelCase__ = self.evaluate(UpperCamelCase , UpperCamelCase)
lowerCamelCase__ = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(UpperCamelCase))
self.logger.log(UpperCamelCase , commit=UpperCamelCase)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f"""-e{epoch}-s{i}""" , state=UpperCamelCase)
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase):
lowerCamelCase__ = get_batched_dataset(UpperCamelCase , self.args.batch_size)
lowerCamelCase__ = len(UpperCamelCase) // self.args.batch_size
lowerCamelCase__ = jnp.array(0 , dtype=jnp.floataa)
lowerCamelCase__ = 0
for batch in tqdm(UpperCamelCase , total=UpperCamelCase , desc="Evaluating ... "):
lowerCamelCase__ = self.data_collator(UpperCamelCase)
lowerCamelCase__ = self.val_step_fn(UpperCamelCase , **UpperCamelCase)
running_loss += jax_utils.unreplicate(metrics["loss"])
i += 1
return running_loss / i
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase):
lowerCamelCase__ = jax_utils.unreplicate(UpperCamelCase)
print(f"""SAVING CHECKPOINT IN {save_dir}""" , end=" ... ")
self.model_save_fn(UpperCamelCase , params=state.params)
with open(os.path.join(UpperCamelCase , "opt_state.msgpack") , "wb") as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(UpperCamelCase , "args.joblib"))
joblib.dump(self.data_collator , os.path.join(UpperCamelCase , "data_collator.joblib"))
with open(os.path.join(UpperCamelCase , "training_state.json") , "w") as f:
json.dump({"step": state.step.item()} , UpperCamelCase)
print("DONE")
def lowerCAmelCase( a__ : str , a__ : Optional[int] ):
'''simple docstring'''
print(f"""RESTORING CHECKPOINT FROM {save_dir}""" , end=" ... " )
with open(os.path.join(a__ , "flax_model.msgpack" ) , "rb" ) as f:
lowerCamelCase__ = from_bytes(state.params , f.read() )
with open(os.path.join(a__ , "opt_state.msgpack" ) , "rb" ) as f:
lowerCamelCase__ = from_bytes(state.opt_state , f.read() )
lowerCamelCase__ = joblib.load(os.path.join(a__ , "args.joblib" ) )
lowerCamelCase__ = joblib.load(os.path.join(a__ , "data_collator.joblib" ) )
with open(os.path.join(a__ , "training_state.json" ) , "r" ) as f:
lowerCamelCase__ = json.load(a__ )
lowerCamelCase__ = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def lowerCAmelCase( a__ : Union[str, Any] , a__ : List[str] , a__ : Tuple , a__ : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ = num_train_steps - warmup_steps
lowerCamelCase__ = optax.linear_schedule(init_value=a__ , end_value=a__ , transition_steps=a__ )
lowerCamelCase__ = optax.linear_schedule(init_value=a__ , end_value=1E-7 , transition_steps=a__ )
lowerCamelCase__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def lowerCAmelCase( a__ : Any , a__ : Union[str, Any] , a__ : Optional[int] , a__ : List[str] , a__ : Union[str, Any] ):
'''simple docstring'''
def weight_decay_mask(a__ : str ):
lowerCamelCase__ = traverse_util.flatten_dict(a__ )
lowerCamelCase__ = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(a__ )
lowerCamelCase__ = scheduler_fn(a__ , a__ , a__ , a__ )
lowerCamelCase__ = optax.adamw(learning_rate=a__ , weight_decay=a__ , mask=a__ )
return tx, lr
| 426
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_snake_case = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def __snake_case ( SCREAMING_SNAKE_CASE: str = "mumbai" ):
"""simple docstring"""
_lowerCAmelCase = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
_lowerCAmelCase = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
_lowerCAmelCase = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f'Job {i:>2} is {job[0]} at {job[1]}')
| 580
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase = logging.get_logger(__name__)
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["pixel_values"]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : float = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = size if size is not None else {"shortest_edge": 384}
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
# Default value set here for backwards compatibility where the value in config is None
lowerCAmelCase__ = crop_pct if crop_pct is not None else 224 / 256
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : int , ) -> np.ndarray:
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
lowerCAmelCase__ = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCAmelCase__ = int(shortest_edge / crop_pct )
lowerCAmelCase__ = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=SCREAMING_SNAKE_CASE__ , size=(shortest_edge, shortest_edge) , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
SCREAMING_SNAKE_CASE__ , size=(shortest_edge, shortest_edge) , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : int , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[int, float] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> List[Any]:
return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> np.ndarray:
return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : float = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : float = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : Dict , ) -> PIL.Image.Image:
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = crop_pct if crop_pct is not None else self.crop_pct
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
lowerCAmelCase__ = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , crop_pct=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
lowerCAmelCase__ = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
| 61
| 0
|
def UpperCamelCase( lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def UpperCamelCase( lowercase_ , lowercase_=0 ) -> str:
'''simple docstring'''
return sorted(lowerCAmelCase__ , key=lambda lowercase_ : x[column] )
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_=float("""inf""" ) ) -> List[str]:
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , lowerCAmelCase__ ):
snake_case_ = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
snake_case_ = current_dis
return min_dis
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_=float("""inf""" ) ) -> Tuple:
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , lowerCAmelCase__ ):
for j in range(max(0 , i - 6 ) , lowerCAmelCase__ ):
snake_case_ = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
snake_case_ = current_dis
return min_dis
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(lowerCAmelCase__ , lowerCAmelCase__ )
# recursion
snake_case_ = points_counts // 2
snake_case_ = closest_pair_of_points_sqr(
lowerCAmelCase__ , points_sorted_on_y[:mid] , lowerCAmelCase__ )
snake_case_ = closest_pair_of_points_sqr(
lowerCAmelCase__ , points_sorted_on_y[mid:] , points_counts - mid )
snake_case_ = min(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(lowerCAmelCase__ )
snake_case_ = dis_between_closest_in_strip(
lowerCAmelCase__ , len(lowerCAmelCase__ ) , lowerCAmelCase__ )
return min(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = column_based_sort(lowerCAmelCase__ , column=0 )
snake_case_ = column_based_sort(lowerCAmelCase__ , column=1 )
return (
closest_pair_of_points_sqr(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
) ** 0.5
if __name__ == "__main__":
lowerCamelCase_ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 721
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Optional[Any] = 'openai/whisper-base'
lowerCamelCase_ : Union[str, Any] = (
'This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '
'transcribed text.'
)
lowerCamelCase_ : int = 'transcriber'
lowerCamelCase_ : List[Any] = WhisperProcessor
lowerCamelCase_ : Optional[Any] = WhisperForConditionalGeneration
lowerCamelCase_ : Optional[int] = ['audio']
lowerCamelCase_ : Union[str, Any] = ['text']
def lowerCAmelCase_ ( self , lowerCamelCase ) -> Tuple:
return self.pre_processor(lowerCamelCase , return_tensors="""pt""" ).input_features
def lowerCAmelCase_ ( self , lowerCamelCase ) -> int:
return self.model.generate(inputs=lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase ) -> Optional[Any]:
return self.pre_processor.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )[0]
| 161
| 0
|
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def _snake_case ( A , A=False ) -> Optional[Any]:
try:
lowerCAmelCase__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCAmelCase__ = default
else:
# KEY is set, convert it to True or False.
try:
lowerCAmelCase__ = strtobool(A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__UpperCAmelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
def _snake_case ( A ) -> List[Any]:
return unittest.skip('''Test was skipped''' )(A )
def _snake_case ( A ) -> Tuple:
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(A )
def _snake_case ( A ) -> Any:
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(A )
def _snake_case ( A ) -> str:
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(A )
def _snake_case ( A ) -> str:
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(A )
def _snake_case ( A ) -> Dict:
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(A )
def _snake_case ( A ) -> Tuple:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(A )
def _snake_case ( A ) -> Dict:
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(A )
def _snake_case ( A ) -> Optional[Any]:
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(A )
def _snake_case ( A ) -> Optional[int]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(A )
def _snake_case ( A ) -> List[str]:
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(A )
def _snake_case ( A ) -> List[Any]:
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(A )
def _snake_case ( A ) -> str:
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(A )
def _snake_case ( A ) -> Any:
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(A )
def _snake_case ( A ) -> Optional[int]:
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(A )
def _snake_case ( A ) -> Union[str, Any]:
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(A )
def _snake_case ( A=None , A=None ) -> Union[str, Any]:
if test_case is None:
return partial(A , version=A )
return unittest.skipUnless(is_torch_version('''>=''' , A ) , F"""test requires torch version >= {version}""" )(A )
def _snake_case ( A ) -> List[str]:
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(A )
def _snake_case ( A ) -> Optional[int]:
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(A )
def _snake_case ( A ) -> Optional[int]:
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(A )
__UpperCAmelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def _snake_case ( A ) -> Optional[int]:
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(A )
class a__ ( unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[Any] = True
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ) -> str:
lowerCAmelCase__ = tempfile.mkdtemp()
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ) -> str:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowerCamelCase_ )
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[Any]:
lowerCAmelCase__ = mocks if isinstance(lowerCamelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def _snake_case ( A ) -> int:
lowerCAmelCase__ = AcceleratorState()
lowerCAmelCase__ = tensor[None].clone().to(state.device )
lowerCAmelCase__ = gather(A ).cpu()
lowerCAmelCase__ = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , A ):
return False
return True
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
lowerCAmelCase__ = returncode
lowerCAmelCase__ = stdout
lowerCAmelCase__ = stderr
async def _snake_case ( A , A ) -> Union[str, Any]:
while True:
lowerCAmelCase__ = await stream.readline()
if line:
callback(A )
else:
break
async def _snake_case ( A , A=None , A=None , A=None , A=False , A=False ) -> _RunOutput:
if echo:
print('''\nRunning: ''' , ''' '''.join(A ) )
lowerCAmelCase__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def tee(A , A , A , A="" ):
lowerCAmelCase__ = line.decode('''utf-8''' ).rstrip()
sink.append(A )
if not quiet:
print(A , A , file=A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda A : tee(A , A , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda A : tee(A , A , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=A , )
return _RunOutput(await p.wait() , A , A )
def _snake_case ( A , A=None , A=None , A=180 , A=False , A=True ) -> _RunOutput:
lowerCAmelCase__ = asyncio.get_event_loop()
lowerCAmelCase__ = loop.run_until_complete(
_stream_subprocess(A , env=A , stdin=A , timeout=A , quiet=A , echo=A ) )
lowerCAmelCase__ = ''' '''.join(A )
if result.returncode > 0:
lowerCAmelCase__ = '''\n'''.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
return result
class a__ ( a__ ):
'''simple docstring'''
pass
def _snake_case ( A , A=False ) -> Optional[int]:
try:
lowerCAmelCase__ = subprocess.check_output(A , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(A , '''decode''' ):
lowerCAmelCase__ = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{" ".join(A )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 90
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = '''ylacombe/bark-small'''
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = '''en_speaker_1'''
SCREAMING_SNAKE_CASE__ = '''This is a test string'''
SCREAMING_SNAKE_CASE__ = '''speaker_embeddings_path.json'''
SCREAMING_SNAKE_CASE__ = '''speaker_embeddings'''
def lowercase_ ( self : Union[str, Any] , **__lowerCamelCase : Dict ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint , **__lowerCamelCase )
def lowercase_ ( self : Optional[int] ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : Dict ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = BarkProcessor(tokenizer=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowercase_ ( self : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowercase_ ( self : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
SCREAMING_SNAKE_CASE__ = 35
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = {
'''semantic_prompt''': np.ones(__lowerCamelCase ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
SCREAMING_SNAKE_CASE__ = processor(text=self.input_string , voice_preset=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = processor(text=self.input_string , voice_preset=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
SCREAMING_SNAKE_CASE__ = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowercase_ ( self : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = BarkProcessor(tokenizer=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = processor(text=self.input_string )
SCREAMING_SNAKE_CASE__ = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 493
| 0
|
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
SCREAMING_SNAKE_CASE_ : Any = False
if num < 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Any = -num
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(SCREAMING_SNAKE_CASE ) for e in binary )
return "0b" + "".join(str(SCREAMING_SNAKE_CASE ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
stooge(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) - 1 )
return arr
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
SCREAMING_SNAKE_CASE_ : Any = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , (h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE , i + t , (SCREAMING_SNAKE_CASE) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , (h - t) )
if __name__ == "__main__":
lowerCAmelCase__: List[Any] = input("Enter numbers separated by a comma:\n").strip()
lowerCAmelCase__: Optional[Any] = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 311
| 0
|
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__SCREAMING_SNAKE_CASE ={
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = "maskformer"
SCREAMING_SNAKE_CASE__ : Optional[int] = {"hidden_size": "mask_feature_size"}
SCREAMING_SNAKE_CASE__ : Dict = ["resnet", "swin"]
SCREAMING_SNAKE_CASE__ : Optional[int] = ["detr"]
def __init__( self: List[str] , _lowerCamelCase: int = 2_56 , _lowerCamelCase: int = 2_56 , _lowerCamelCase: float = 0.1 , _lowerCamelCase: bool = False , _lowerCamelCase: Optional[Dict] = None , _lowerCamelCase: Optional[Dict] = None , _lowerCamelCase: float = 0.02 , _lowerCamelCase: float = 1.0 , _lowerCamelCase: float = 1.0 , _lowerCamelCase: float = 1.0 , _lowerCamelCase: float = 20.0 , _lowerCamelCase: Optional[bool] = None , **_lowerCamelCase: Any , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
SCREAMING_SNAKE_CASE_ = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = backbone_config.pop('''model_type''' )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(_lowerCamelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
f"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
SCREAMING_SNAKE_CASE_ = DetrConfig()
else:
# verify that the decoder is supported
SCREAMING_SNAKE_CASE_ = (
decoder_config.pop('''model_type''' ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"Transformer Decoder {decoder_type} not supported, please use one of"
f" {','.join(self.decoders_supported )}" )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[decoder_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = backbone_config
SCREAMING_SNAKE_CASE_ = decoder_config
# main feature dimension for the model
SCREAMING_SNAKE_CASE_ = fpn_feature_size
SCREAMING_SNAKE_CASE_ = mask_feature_size
# initializer
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = init_xavier_std
# Hungarian matcher && loss
SCREAMING_SNAKE_CASE_ = cross_entropy_weight
SCREAMING_SNAKE_CASE_ = dice_weight
SCREAMING_SNAKE_CASE_ = mask_weight
SCREAMING_SNAKE_CASE_ = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ = no_object_weight
SCREAMING_SNAKE_CASE_ = output_auxiliary_logits
SCREAMING_SNAKE_CASE_ = self.decoder_config.encoder_attention_heads
SCREAMING_SNAKE_CASE_ = self.decoder_config.num_hidden_layers
super().__init__(**_lowerCamelCase )
@classmethod
def _A ( cls: Any , _lowerCamelCase: PretrainedConfig , _lowerCamelCase: PretrainedConfig , **_lowerCamelCase: Any ):
return cls(
backbone_config=_lowerCamelCase , decoder_config=_lowerCamelCase , **_lowerCamelCase , )
def _A ( self: int ):
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.decoder_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output
| 234
|
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def a ():
SCREAMING_SNAKE_CASE_ = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
SCREAMING_SNAKE_CASE_ = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(_lowerCAmelCase )
DownloadCommand.register_subcommand(_lowerCAmelCase )
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
RunCommand.register_subcommand(_lowerCAmelCase )
ServeCommand.register_subcommand(_lowerCAmelCase )
UserCommands.register_subcommand(_lowerCAmelCase )
AddNewModelCommand.register_subcommand(_lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase )
LfsCommands.register_subcommand(_lowerCAmelCase )
PTtoTFCommand.register_subcommand(_lowerCAmelCase )
# Let's go
SCREAMING_SNAKE_CASE_ = parser.parse_args()
if not hasattr(_lowerCAmelCase , '''func''' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 234
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class a ( lowercase ):
UpperCamelCase : "DiagonalGaussianDistribution"
class a ( lowercase , lowercase ):
UpperCamelCase : Optional[int] = True
@register_to_config
def __init__( self , UpperCamelCase_ = 3 , UpperCamelCase_ = 3 , UpperCamelCase_ = ("DownEncoderBlock2D",) , UpperCamelCase_ = ("UpDecoderBlock2D",) , UpperCamelCase_ = (64,) , UpperCamelCase_ = 1 , UpperCamelCase_ = "silu" , UpperCamelCase_ = 4 , UpperCamelCase_ = 32 , UpperCamelCase_ = 32 , UpperCamelCase_ = 0.18215 , ):
super().__init__()
# pass init params to Encoder
UpperCAmelCase__ : Any = Encoder(
in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , down_block_types=UpperCamelCase_ , block_out_channels=UpperCamelCase_ , layers_per_block=UpperCamelCase_ , act_fn=UpperCamelCase_ , norm_num_groups=UpperCamelCase_ , double_z=UpperCamelCase_ , )
# pass init params to Decoder
UpperCAmelCase__ : Dict = Decoder(
in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , up_block_types=UpperCamelCase_ , block_out_channels=UpperCamelCase_ , layers_per_block=UpperCamelCase_ , norm_num_groups=UpperCamelCase_ , act_fn=UpperCamelCase_ , )
UpperCAmelCase__ : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCAmelCase__ : Union[str, Any] = nn.Convad(UpperCamelCase_ , UpperCamelCase_ , 1 )
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Tuple = False
# only relevant if vae tiling is enabled
UpperCAmelCase__ : List[str] = self.config.sample_size
UpperCAmelCase__ : Optional[Any] = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase__ : Tuple = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase__ : Optional[int] = 0.25
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False ):
if isinstance(UpperCamelCase_ , (Encoder, Decoder) ):
UpperCAmelCase__ : Optional[Any] = value
def __snake_case ( self , UpperCamelCase_ = True ):
UpperCAmelCase__ : int = use_tiling
def __snake_case ( self ):
self.enable_tiling(UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : Union[str, Any] = True
def __snake_case ( self ):
UpperCAmelCase__ : Any = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __snake_case ( self ):
UpperCAmelCase__ : Optional[int] = {}
def fn_recursive_add_processors(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if hasattr(UpperCamelCase_ , 'set_processor' ):
UpperCAmelCase__ : Dict = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , UpperCamelCase_ , UpperCamelCase_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return processors
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(UpperCamelCase_ )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if hasattr(UpperCamelCase_ , 'set_processor' ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
module.set_processor(UpperCamelCase_ )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , UpperCamelCase_ , UpperCamelCase_ )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __snake_case ( self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(UpperCamelCase_ , return_dict=UpperCamelCase_ )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase__ : Optional[int] = [self.encoder(UpperCamelCase_ ) for x_slice in x.split(1 )]
UpperCAmelCase__ : Optional[Any] = torch.cat(UpperCamelCase_ )
else:
UpperCAmelCase__ : Dict = self.encoder(UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = self.quant_conv(UpperCamelCase_ )
UpperCAmelCase__ : int = DiagonalGaussianDistribution(UpperCamelCase_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(UpperCamelCase_ , return_dict=UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = self.post_quant_conv(UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = self.decoder(UpperCamelCase_ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase_ )
@apply_forward_hook
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = True ):
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase__ : Tuple = [self._decode(UpperCamelCase_ ).sample for z_slice in z.split(1 )]
UpperCAmelCase__ : Optional[Any] = torch.cat(UpperCamelCase_ )
else:
UpperCAmelCase__ : Union[str, Any] = self._decode(UpperCamelCase_ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : Dict = min(a.shape[2] , b.shape[2] , UpperCamelCase_ )
for y in range(UpperCamelCase_ ):
UpperCAmelCase__ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : int = min(a.shape[3] , b.shape[3] , UpperCamelCase_ )
for x in range(UpperCamelCase_ ):
UpperCAmelCase__ : Dict = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = True ):
UpperCAmelCase__ : List[str] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase__ : Optional[Any] = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase__ : Any = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase__ : Any = []
for i in range(0 , x.shape[2] , UpperCamelCase_ ):
UpperCAmelCase__ : List[Any] = []
for j in range(0 , x.shape[3] , UpperCamelCase_ ):
UpperCAmelCase__ : Optional[int] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase__ : Any = self.encoder(UpperCamelCase_ )
UpperCAmelCase__ : Tuple = self.quant_conv(UpperCamelCase_ )
row.append(UpperCamelCase_ )
rows.append(UpperCamelCase_ )
UpperCAmelCase__ : Any = []
for i, row in enumerate(UpperCamelCase_ ):
UpperCAmelCase__ : Any = []
for j, tile in enumerate(UpperCamelCase_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase__ : Any = self.blend_v(rows[i - 1][j] , UpperCamelCase_ , UpperCamelCase_ )
if j > 0:
UpperCAmelCase__ : Tuple = self.blend_h(row[j - 1] , UpperCamelCase_ , UpperCamelCase_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(UpperCamelCase_ , dim=3 ) )
UpperCAmelCase__ : Optional[Any] = torch.cat(UpperCamelCase_ , dim=2 )
UpperCAmelCase__ : List[Any] = DiagonalGaussianDistribution(UpperCamelCase_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = True ):
UpperCAmelCase__ : Tuple = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase__ : Any = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase__ : List[Any] = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase__ : List[Any] = []
for i in range(0 , z.shape[2] , UpperCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = []
for j in range(0 , z.shape[3] , UpperCamelCase_ ):
UpperCAmelCase__ : str = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase__ : Dict = self.post_quant_conv(UpperCamelCase_ )
UpperCAmelCase__ : List[str] = self.decoder(UpperCamelCase_ )
row.append(UpperCamelCase_ )
rows.append(UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = []
for i, row in enumerate(UpperCamelCase_ ):
UpperCAmelCase__ : List[str] = []
for j, tile in enumerate(UpperCamelCase_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase__ : List[Any] = self.blend_v(rows[i - 1][j] , UpperCamelCase_ , UpperCamelCase_ )
if j > 0:
UpperCAmelCase__ : int = self.blend_h(row[j - 1] , UpperCamelCase_ , UpperCamelCase_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(UpperCamelCase_ , dim=3 ) )
UpperCAmelCase__ : Any = torch.cat(UpperCamelCase_ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = True , UpperCamelCase_ = None , ):
UpperCAmelCase__ : str = sample
UpperCAmelCase__ : int = self.encode(UpperCamelCase_ ).latent_dist
if sample_posterior:
UpperCAmelCase__ : int = posterior.sample(generator=UpperCamelCase_ )
else:
UpperCAmelCase__ : List[str] = posterior.mode()
UpperCAmelCase__ : Tuple = self.decode(UpperCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase_ )
| 254
|
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a ( lowercase , lowercase , lowercase ):
UpperCamelCase : Any = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = 50_257 , UpperCamelCase_ = 1_024 , UpperCamelCase_ = 768 , UpperCamelCase_ = 12 , UpperCamelCase_ = 12 , UpperCamelCase_ = None , UpperCamelCase_ = "gelu_new" , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 1E-5 , UpperCamelCase_ = 0.02 , UpperCamelCase_ = True , UpperCamelCase_ = True , UpperCamelCase_ = False , UpperCamelCase_ = False , ):
super().__init__()
UpperCAmelCase__ : List[Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
UpperCAmelCase__ : Dict = prefix_inner_dim
UpperCAmelCase__ : List[Any] = prefix_hidden_dim
UpperCAmelCase__ : List[Any] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
UpperCAmelCase__ : Any = (
nn.Linear(self.prefix_hidden_dim , UpperCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
UpperCAmelCase__ : Union[str, Any] = GPTaConfig(
vocab_size=UpperCamelCase_ , n_positions=UpperCamelCase_ , n_embd=UpperCamelCase_ , n_layer=UpperCamelCase_ , n_head=UpperCamelCase_ , n_inner=UpperCamelCase_ , activation_function=UpperCamelCase_ , resid_pdrop=UpperCamelCase_ , embd_pdrop=UpperCamelCase_ , attn_pdrop=UpperCamelCase_ , layer_norm_epsilon=UpperCamelCase_ , initializer_range=UpperCamelCase_ , scale_attn_weights=UpperCamelCase_ , use_cache=UpperCamelCase_ , scale_attn_by_inverse_layer_idx=UpperCamelCase_ , reorder_and_upcast_attn=UpperCamelCase_ , )
UpperCAmelCase__ : List[str] = GPTaLMHeadModel(UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , ):
UpperCAmelCase__ : Optional[Any] = self.transformer.transformer.wte(UpperCamelCase_ )
UpperCAmelCase__ : str = self.encode_prefix(UpperCamelCase_ )
UpperCAmelCase__ : Tuple = self.decode_prefix(UpperCamelCase_ )
UpperCAmelCase__ : List[str] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
UpperCAmelCase__ : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
UpperCAmelCase__ : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
UpperCAmelCase__ : List[str] = self.transformer(inputs_embeds=UpperCamelCase_ , labels=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
return torch.zeros(UpperCamelCase_ , self.prefix_length , dtype=torch.intaa , device=UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ ):
return self.encode_prefix(UpperCamelCase_ )
@torch.no_grad()
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : List[str] = torch.split(UpperCamelCase_ , 1 , dim=0 )
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Optional[Any] = []
for feature in features:
UpperCAmelCase__ : List[str] = self.decode_prefix(feature.to(UpperCamelCase_ ) ) # back to the clip feature
# Only support beam search for now
UpperCAmelCase__ , UpperCAmelCase__ : int = self.generate_beam(
input_embeds=UpperCamelCase_ , device=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
UpperCAmelCase__ : Optional[int] = torch.stack(UpperCamelCase_ )
UpperCAmelCase__ : Any = torch.stack(UpperCamelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __snake_case ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_ = 5 , UpperCamelCase_ = 67 , UpperCamelCase_ = 1.0 , UpperCamelCase_ = None , ):
UpperCAmelCase__ : Optional[int] = eos_token_id
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : List[str] = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ , dtype=torch.int )
UpperCAmelCase__ : str = torch.zeros(UpperCamelCase_ , device=UpperCamelCase_ , dtype=torch.bool )
if input_embeds is not None:
UpperCAmelCase__ : List[Any] = input_embeds
else:
UpperCAmelCase__ : Union[str, Any] = self.transformer.transformer.wte(UpperCamelCase_ )
for i in range(UpperCamelCase_ ):
UpperCAmelCase__ : Tuple = self.transformer(inputs_embeds=UpperCamelCase_ )
UpperCAmelCase__ : Dict = outputs.logits
UpperCAmelCase__ : List[Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
UpperCAmelCase__ : int = logits.softmax(-1 ).log()
if scores is None:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = logits.topk(UpperCamelCase_ , -1 )
UpperCAmelCase__ : List[Any] = generated.expand(UpperCamelCase_ , *generated.shape[1:] )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
UpperCAmelCase__ : Dict = next_tokens
else:
UpperCAmelCase__ : Optional[int] = tokens.expand(UpperCamelCase_ , *tokens.shape[1:] )
UpperCAmelCase__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
else:
UpperCAmelCase__ : str = -float(np.inf )
UpperCAmelCase__ : Any = 0
UpperCAmelCase__ : int = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
UpperCAmelCase__ : Optional[int] = scores_sum / seq_lengths[:, None]
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = scores_sum_average.view(-1 ).topk(UpperCamelCase_ , -1 )
UpperCAmelCase__ : List[Any] = next_tokens // scores_sum.shape[1]
UpperCAmelCase__ : str = seq_lengths[next_tokens_source]
UpperCAmelCase__ : str = next_tokens % scores_sum.shape[1]
UpperCAmelCase__ : Optional[Any] = next_tokens.unsqueeze(1 )
UpperCAmelCase__ : List[str] = tokens[next_tokens_source]
UpperCAmelCase__ : List[str] = torch.cat((tokens, next_tokens) , dim=1 )
UpperCAmelCase__ : Any = generated[next_tokens_source]
UpperCAmelCase__ : Tuple = scores_sum_average * seq_lengths
UpperCAmelCase__ : Tuple = is_stopped[next_tokens_source]
UpperCAmelCase__ : List[Any] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
UpperCAmelCase__ : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
UpperCAmelCase__ : List[Any] = is_stopped + next_tokens.eq(UpperCamelCase_ ).squeeze()
if is_stopped.all():
break
UpperCAmelCase__ : Dict = scores / seq_lengths
UpperCAmelCase__ : Optional[Any] = scores.argsort(descending=UpperCamelCase_ )
# tokens tensors are already padded to max_seq_length
UpperCAmelCase__ : Dict = [tokens[i] for i in order]
UpperCAmelCase__ : Optional[Any] = torch.stack(UpperCamelCase_ , dim=0 )
UpperCAmelCase__ : List[str] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 254
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowercase__ :
'''simple docstring'''
_snake_case = 42
_snake_case = None
_snake_case = None
snake_case_ : Dict = namedtuple('CoinsDistribResult', 'moves excess')
def __snake_case ( _UpperCAmelCase : Any):
if root is None:
return 0
# Validation
def count_nodes(_UpperCAmelCase : Tuple) -> int:
if node is None:
return 0
return count_nodes(node.left) + count_nodes(node.right) + 1
def count_coins(_UpperCAmelCase : Tuple) -> int:
if node is None:
return 0
return count_coins(node.left) + count_coins(node.right) + node.data
if count_nodes(_UpperCAmelCase) != count_coins(_UpperCAmelCase):
raise ValueError('''The nodes number should be same as the number of coins''')
# Main calculation
def get_distrib(_UpperCAmelCase : str) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0, 1)
UpperCamelCase = get_distrib(node.left)
UpperCamelCase = get_distrib(node.right)
UpperCamelCase = 1 - left_distrib_excess
UpperCamelCase = 1 - right_distrib_excess
UpperCamelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(_UpperCAmelCase)
+ abs(_UpperCAmelCase)
)
UpperCamelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_UpperCAmelCase, _UpperCAmelCase)
return get_distrib(_UpperCAmelCase)[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 212
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def snake_case__ ( UpperCamelCase=None ) -> Optional[int]:
if subparsers is not None:
_UpperCamelCase : Dict = subparsers.add_parser('''env''' )
else:
_UpperCamelCase : Tuple = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' ,default=UpperCamelCase ,help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase )
return parser
def snake_case__ ( UpperCamelCase ) -> Any:
_UpperCamelCase : int = torch.__version__
_UpperCamelCase : int = torch.cuda.is_available()
_UpperCamelCase : List[str] = is_xpu_available()
_UpperCamelCase : Dict = is_npu_available()
_UpperCamelCase : Optional[Any] = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCamelCase ):
_UpperCamelCase : List[str] = load_config_from_file(args.config_file ).to_dict()
_UpperCamelCase : List[Any] = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''',
'''PyTorch XPU available''': str(UpperCamelCase ),
'''PyTorch NPU available''': str(UpperCamelCase ),
'''System RAM''': f'''{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB''',
}
if pt_cuda_available:
_UpperCamelCase : int = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([f'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
_UpperCamelCase : Union[str, Any] = (
'''\n'''.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase ,UpperCamelCase )
else f'''\t{accelerate_config}'''
)
print(UpperCamelCase )
_UpperCamelCase : str = accelerate_config
return info
def snake_case__ ( ) -> int:
_UpperCamelCase : str = env_command_parser()
_UpperCamelCase : Any = parser.parse_args()
env_command(UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 683
| 0
|
import math
def __lowerCamelCase ( __a : int ) -> list[int]:
_lowercase =[]
_lowercase =2
_lowercase =int(math.sqrt(__a ) ) # Size of every segment
_lowercase =[True] * (end + 1)
_lowercase =[]
while start <= end:
if temp[start] is True:
in_prime.append(__a )
for i in range(start * start , end + 1 , __a ):
_lowercase =False
start += 1
prime += in_prime
_lowercase =end + 1
_lowercase =min(2 * end , __a )
while low <= n:
_lowercase =[True] * (high - low + 1)
for each in in_prime:
_lowercase =math.floor(low / each ) * each
if t < low:
t += each
for j in range(__a , high + 1 , __a ):
_lowercase =False
for j in range(len(__a ) ):
if temp[j] is True:
prime.append(j + low )
_lowercase =high + 1
_lowercase =min(high + end , __a )
return prime
print(sieve(1_0**6))
| 594
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowerCAmelCase__ = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
lowerCAmelCase__ = {"facebook/blenderbot-3B": 1_2_8}
class _a ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE = BlenderbotTokenizer
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="replace" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_=False , lowerCAmelCase_=True , **lowerCAmelCase_ , ):
super().__init__(
lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , **lowerCAmelCase_ , )
_lowercase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCAmelCase_ ) != add_prefix_space:
_lowercase =getattr(lowerCAmelCase_ , pre_tok_state.pop("type" ) )
_lowercase =add_prefix_space
_lowercase =pre_tok_class(**lowerCAmelCase_ )
_lowercase =add_prefix_space
_lowercase ="post_processor"
_lowercase =getattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_ )
if tokenizer_component_instance:
_lowercase =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowercase =tuple(state["sep"] )
if "cls" in state:
_lowercase =tuple(state["cls"] )
_lowercase =False
if state.get("add_prefix_space" , lowerCAmelCase_ ) != add_prefix_space:
_lowercase =add_prefix_space
_lowercase =True
if state.get("trim_offsets" , lowerCAmelCase_ ) != trim_offsets:
_lowercase =trim_offsets
_lowercase =True
if changes_to_apply:
_lowercase =getattr(lowerCAmelCase_ , state.pop("type" ) )
_lowercase =component_class(**lowerCAmelCase_ )
setattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowerCAmelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else value
_lowercase =value
def __lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
_lowercase =kwargs.get("is_split_into_words" , lowerCAmelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
_lowercase =kwargs.get("is_split_into_words" , lowerCAmelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
_lowercase =self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
_lowercase =[self.sep_token_id]
_lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
return token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(lowerCAmelCase_ )
_lowercase =" ".join(lowerCAmelCase_ )
_lowercase =self.encode(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > self.model_max_length:
_lowercase =input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 594
| 1
|
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase ( snake_case_ , snake_case_ , snake_case_ ):
_lowercase: Dict = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self : Optional[int] , __snake_case : int , __snake_case : int , __snake_case : Optional[int] = None , __snake_case : int = 5_02_57 , __snake_case : int = 10_24 , __snake_case : int = 7_68 , __snake_case : int = 12 , __snake_case : int = 12 , __snake_case : Optional[int] = None , __snake_case : str = "gelu_new" , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : float = 1E-5 , __snake_case : float = 0.02 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : bool = False , __snake_case : bool = False , ) -> List[str]:
super().__init__()
_lowerCAmelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
_lowerCAmelCase = prefix_inner_dim
_lowerCAmelCase = prefix_hidden_dim
_lowerCAmelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_lowerCAmelCase = (
nn.Linear(self.prefix_hidden_dim , __snake_case ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_lowerCAmelCase = GPTaConfig(
vocab_size=__snake_case , n_positions=__snake_case , n_embd=__snake_case , n_layer=__snake_case , n_head=__snake_case , n_inner=__snake_case , activation_function=__snake_case , resid_pdrop=__snake_case , embd_pdrop=__snake_case , attn_pdrop=__snake_case , layer_norm_epsilon=__snake_case , initializer_range=__snake_case , scale_attn_weights=__snake_case , use_cache=__snake_case , scale_attn_by_inverse_layer_idx=__snake_case , reorder_and_upcast_attn=__snake_case , )
_lowerCAmelCase = GPTaLMHeadModel(__snake_case )
def lowercase__ ( self : Optional[Any] , __snake_case : torch.Tensor , __snake_case : torch.Tensor , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[torch.Tensor] = None , ) -> Tuple:
_lowerCAmelCase = self.transformer.transformer.wte(__snake_case )
_lowerCAmelCase = self.encode_prefix(__snake_case )
_lowerCAmelCase = self.decode_prefix(__snake_case )
_lowerCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_lowerCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_lowerCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 )
_lowerCAmelCase = self.transformer(inputs_embeds=__snake_case , labels=__snake_case , attention_mask=__snake_case )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowercase__ ( self : str , __snake_case : int , __snake_case : torch.device ) -> torch.Tensor:
return torch.zeros(__snake_case , self.prefix_length , dtype=torch.intaa , device=__snake_case )
def lowercase__ ( self : Optional[int] , __snake_case : Optional[int] ) -> Optional[int]:
return self.encode_prefix(__snake_case )
@torch.no_grad()
def lowercase__ ( self : Any , __snake_case : str , __snake_case : List[str] , __snake_case : Any ) -> Tuple:
_lowerCAmelCase = torch.split(__snake_case , 1 , dim=0 )
_lowerCAmelCase = []
_lowerCAmelCase = []
for feature in features:
_lowerCAmelCase = self.decode_prefix(feature.to(__snake_case ) ) # back to the clip feature
# Only support beam search for now
_lowerCAmelCase , _lowerCAmelCase = self.generate_beam(
input_embeds=__snake_case , device=__snake_case , eos_token_id=__snake_case )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_lowerCAmelCase = torch.stack(__snake_case )
_lowerCAmelCase = torch.stack(__snake_case )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowercase__ ( self : str , __snake_case : List[str]=None , __snake_case : Dict=None , __snake_case : List[str]=None , __snake_case : int = 5 , __snake_case : int = 67 , __snake_case : float = 1.0 , __snake_case : Optional[int] = None , ) -> int:
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = torch.ones(__snake_case , device=__snake_case , dtype=torch.int )
_lowerCAmelCase = torch.zeros(__snake_case , device=__snake_case , dtype=torch.bool )
if input_embeds is not None:
_lowerCAmelCase = input_embeds
else:
_lowerCAmelCase = self.transformer.transformer.wte(__snake_case )
for i in range(__snake_case ):
_lowerCAmelCase = self.transformer(inputs_embeds=__snake_case )
_lowerCAmelCase = outputs.logits
_lowerCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_lowerCAmelCase = logits.softmax(-1 ).log()
if scores is None:
_lowerCAmelCase , _lowerCAmelCase = logits.topk(__snake_case , -1 )
_lowerCAmelCase = generated.expand(__snake_case , *generated.shape[1:] )
_lowerCAmelCase , _lowerCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_lowerCAmelCase = next_tokens
else:
_lowerCAmelCase = tokens.expand(__snake_case , *tokens.shape[1:] )
_lowerCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
_lowerCAmelCase = -float(np.inf )
_lowerCAmelCase = 0
_lowerCAmelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_lowerCAmelCase = scores_sum / seq_lengths[:, None]
_lowerCAmelCase , _lowerCAmelCase = scores_sum_average.view(-1 ).topk(__snake_case , -1 )
_lowerCAmelCase = next_tokens // scores_sum.shape[1]
_lowerCAmelCase = seq_lengths[next_tokens_source]
_lowerCAmelCase = next_tokens % scores_sum.shape[1]
_lowerCAmelCase = next_tokens.unsqueeze(1 )
_lowerCAmelCase = tokens[next_tokens_source]
_lowerCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
_lowerCAmelCase = generated[next_tokens_source]
_lowerCAmelCase = scores_sum_average * seq_lengths
_lowerCAmelCase = is_stopped[next_tokens_source]
_lowerCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_lowerCAmelCase = torch.cat((generated, next_token_embed) , dim=1 )
_lowerCAmelCase = is_stopped + next_tokens.eq(__snake_case ).squeeze()
if is_stopped.all():
break
_lowerCAmelCase = scores / seq_lengths
_lowerCAmelCase = scores.argsort(descending=__snake_case )
# tokens tensors are already padded to max_seq_length
_lowerCAmelCase = [tokens[i] for i in order]
_lowerCAmelCase = torch.stack(__snake_case , dim=0 )
_lowerCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 207
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : int ={
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] =[
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
A__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 207
| 1
|
import qiskit
def lowerCamelCase_ ( lowerCamelCase__ = 2 ):
lowerCamelCase_ = qubits
# Using Aer's simulator
lowerCamelCase_ = qiskit.Aer.get_backend("aer_simulator" )
# Creating a Quantum Circuit acting on the q register
lowerCamelCase_ = qiskit.QuantumCircuit(__A , __A )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , __A ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , __A )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(__A ) ) , list(range(__A ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
lowerCamelCase_ = qiskit.execute(__A , __A , shots=1_0_0_0 )
return job.result().get_counts(__A )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 715
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=13 , lowercase=10 , lowercase=3 , lowercase=2 , lowercase=2 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.0_2 , lowercase="divided_space_time" , lowercase=None , ) -> Dict:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_frames
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = attention_type
lowerCamelCase_ = initializer_range
lowerCamelCase_ = scope
lowerCamelCase_ = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowerCamelCase_ = (image_size // patch_size) ** 2
lowerCamelCase_ = (num_frames) * self.num_patches_per_frame + 1
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowerCamelCase_ = self.num_labels
return config
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> Dict:
lowerCamelCase_ = TimesformerModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> Dict:
lowerCamelCase_ = TimesformerForVideoClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase )
# verify the logits shape
lowerCamelCase_ = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowerCAmelCase__ = (
{'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = TimesformerModelTester(self )
lowerCamelCase_ = ConfigTester(
self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=False ) -> Optional[int]:
lowerCamelCase_ = copy.deepcopy(lowercase )
if return_labels:
if model_class in get_values(lowercase ):
lowerCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def SCREAMING_SNAKE_CASE_( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
pass
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> int:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TimesformerModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
if not self.has_attentions:
pass
else:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = True
for model_class in self.all_model_classes:
lowerCamelCase_ = self.model_tester.seq_length
lowerCamelCase_ = self.model_tester.num_frames
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(lowercase , lowercase ) )
lowerCamelCase_ = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ = True
lowerCamelCase_ = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(lowercase , lowercase ) )
lowerCamelCase_ = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowerCamelCase_ = len(lowercase )
# Check attention is always last and order is fine
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + 1 , len(lowercase ) )
lowerCamelCase_ = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
def check_hidden_states_output(lowercase , lowercase , lowercase ):
lowerCamelCase_ = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(lowercase , lowercase ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase ) , lowercase )
lowerCamelCase_ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def lowerCamelCase_ ( ):
lowerCamelCase_ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
lowerCamelCase_ = np.load(lowerCamelCase__ )
return list(lowerCamelCase__ )
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
lowercase )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_video()
lowerCamelCase_ = image_processor(video[:8] , return_tensors="pt" ).to(lowercase )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**lowercase )
# verify the logits
lowerCamelCase_ = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase )
lowerCamelCase_ = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4 ) )
| 313
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Optional[Any] =logging.get_logger(__name__)
_lowercase : Dict ={
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : List[str] = "roberta"
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int]=5_02_65 , SCREAMING_SNAKE_CASE__ : str=7_68 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : int=30_72 , SCREAMING_SNAKE_CASE__ : str="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=5_12 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Any=0.0_2 , SCREAMING_SNAKE_CASE__ : Dict=1e-12 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]="absolute" , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Dict:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =vocab_size
A : List[Any] =hidden_size
A : Union[str, Any] =num_hidden_layers
A : Optional[int] =num_attention_heads
A : Tuple =hidden_act
A : Tuple =intermediate_size
A : Optional[int] =hidden_dropout_prob
A : str =attention_probs_dropout_prob
A : Any =max_position_embeddings
A : int =type_vocab_size
A : List[str] =initializer_range
A : List[Any] =layer_norm_eps
A : Dict =position_embedding_type
A : str =use_cache
A : str =classifier_dropout
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A : Optional[int] ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A : Union[str, Any] ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 305
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_lowercase : str =logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
lowercase : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
lowercase : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
lowercase : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowercase : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
A : Tuple =self.task_name.lower()
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[int] = "train"
lowercase : int = "dev"
lowercase : Union[str, Any] = "test"
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : GlueDataTrainingArguments
lowercase : str
lowercase : List[InputFeatures]
def __init__( self : str , SCREAMING_SNAKE_CASE__ : GlueDataTrainingArguments , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizerBase , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Union[str, Split] = Split.train , SCREAMING_SNAKE_CASE__ : Optional[str] = None , ) -> List[Any]:
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , SCREAMING_SNAKE_CASE__ , )
A : Any =args
A : Union[str, Any] =glue_processors[args.task_name]()
A : Union[str, Any] =glue_output_modes[args.task_name]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
try:
A : Any =Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
A : Tuple =os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
A : Optional[Any] =self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A , A : str =label_list[2], label_list[1]
A : Tuple =label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A : int =cached_features_file + '.lock'
with FileLock(SCREAMING_SNAKE_CASE__ ):
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not args.overwrite_cache:
A : Optional[Any] =time.time()
A : str =torch.load(SCREAMING_SNAKE_CASE__ )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(f'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
A : int =self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
A : Dict =self.processor.get_test_examples(args.data_dir )
else:
A : Optional[Any] =self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
A : Optional[int] =examples[:limit_length]
A : int =glue_convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , max_length=args.max_seq_length , label_list=SCREAMING_SNAKE_CASE__ , output_mode=self.output_mode , )
A : List[Any] =time.time()
torch.save(self.features , SCREAMING_SNAKE_CASE__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ) -> Union[str, Any]:
return len(self.features )
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
return self.label_list
| 305
| 1
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1_00 ) -> int:
'''simple docstring'''
lowercase_ = set()
lowercase_ = 0
lowercase_ = n + 1 # maximum limit
for a in range(2 , __lowerCAmelCase ):
for b in range(2 , __lowerCAmelCase ):
lowercase_ = a**b # calculates the current power
collect_powers.add(__lowerCAmelCase ) # adds the result to the set
return len(__lowerCAmelCase )
if __name__ == "__main__":
print("Number of terms ", solution(int(str(input()).strip())))
| 100
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def _UpperCAmelCase ( *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Tuple):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""")
lowercase_ = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = object_detector(examples[0] , threshold=0.0)
lowercase_ = len(lowerCAmelCase_)
self.assertGreater(lowerCAmelCase_ , 0)
self.assertEqual(
lowerCAmelCase_ , [
{
"""score""": ANY(lowerCAmelCase_),
"""label""": ANY(lowerCAmelCase_),
"""box""": {"""xmin""": ANY(lowerCAmelCase_), """ymin""": ANY(lowerCAmelCase_), """xmax""": ANY(lowerCAmelCase_), """ymax""": ANY(lowerCAmelCase_)},
}
for i in range(lowerCAmelCase_)
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""")
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
pass
@require_torch
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""")
lowercase_ = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.7_235, """label""": """cat""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.7_218, """label""": """remote""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.7_184, """label""": """couch""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.6_748, """label""": """remote""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.6_656, """label""": """cat""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.6_614, """label""": """couch""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.6_456, """label""": """remote""", """box""": {"""xmin""": 4_9_4, """ymin""": 1_0_5, """xmax""": 5_2_1, """ymax""": 1_2_7}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 6_7, """ymin""": 2_7_4, """xmax""": 9_3, """ymax""": 2_9_7}},
{"""score""": 0.6_419, """label""": """cat""", """box""": {"""xmin""": 4_9_4, """ymin""": 1_0_5, """xmax""": 5_2_1, """ymax""": 1_2_7}},
] , )
lowercase_ = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
[
{"""score""": 0.7_235, """label""": """cat""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.7_218, """label""": """remote""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.7_184, """label""": """couch""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.6_748, """label""": """remote""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.6_656, """label""": """cat""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.6_614, """label""": """couch""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.6_456, """label""": """remote""", """box""": {"""xmin""": 4_9_4, """ymin""": 1_0_5, """xmax""": 5_2_1, """ymax""": 1_2_7}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 6_7, """ymin""": 2_7_4, """xmax""": 9_3, """ymax""": 2_9_7}},
{"""score""": 0.6_419, """label""": """cat""", """box""": {"""xmin""": 4_9_4, """ymin""": 1_0_5, """xmax""": 5_2_1, """ymax""": 1_2_7}},
]
] , )
@require_torch
@slow
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = pipeline("""zero-shot-object-detection""")
lowercase_ = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.2_868, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}},
{"""score""": 0.2_537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 5_5, """xmax""": 3_1_5, """ymax""": 4_7_2}},
{"""score""": 0.1_474, """label""": """remote""", """box""": {"""xmin""": 3_3_5, """ymin""": 7_4, """xmax""": 3_7_1, """ymax""": 1_8_7}},
{"""score""": 0.1_208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_4_2, """ymax""": 4_7_6}},
] , )
lowercase_ = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
[
{"""score""": 0.2_868, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}},
{"""score""": 0.2_537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 5_5, """xmax""": 3_1_5, """ymax""": 4_7_2}},
{"""score""": 0.1_474, """label""": """remote""", """box""": {"""xmin""": 3_3_5, """ymin""": 7_4, """xmax""": 3_7_1, """ymax""": 1_8_7}},
{"""score""": 0.1_208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_4_2, """ymax""": 4_7_6}},
],
[
{"""score""": 0.2_868, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}},
{"""score""": 0.2_537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 5_5, """xmax""": 3_1_5, """ymax""": 4_7_2}},
{"""score""": 0.1_474, """label""": """remote""", """box""": {"""xmin""": 3_3_5, """ymin""": 7_4, """xmax""": 3_7_1, """ymax""": 1_8_7}},
{"""score""": 0.1_208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_4_2, """ymax""": 4_7_6}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""")
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
pass
@require_torch
@slow
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = 0.2
lowercase_ = pipeline("""zero-shot-object-detection""")
lowercase_ = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=lowerCAmelCase_ , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.2_868, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}},
{"""score""": 0.2_537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 5_5, """xmax""": 3_1_5, """ymax""": 4_7_2}},
] , )
@require_torch
@slow
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = 2
lowercase_ = pipeline("""zero-shot-object-detection""")
lowercase_ = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=lowerCAmelCase_ , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.2_868, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}},
] , )
| 100
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.