code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import argparse
import os
import re
a_ = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
a_ = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
a_ = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
a_ = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
a_ = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
a_ = re.compile(r'''\[([^\]]+)\]''')
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = _re_indent.search(UpperCamelCase_ )
return "" if search is None else search.groups()[0]
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : Dict="" , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Union[str, Any]=None ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(UpperCamelCase_ ):
index += 1
lowerCAmelCase__ = ["\n".join(lines[:index] )]
else:
lowerCAmelCase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase__ = [lines[index]]
index += 1
while index < len(UpperCamelCase_ ) and (end_prompt is None or not lines[index].startswith(UpperCamelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(UpperCamelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(UpperCamelCase_ ) )
if index < len(UpperCamelCase_ ) - 1:
lowerCAmelCase__ = [lines[index + 1]]
index += 1
else:
lowerCAmelCase__ = []
else:
blocks.append("\n".join(UpperCamelCase_ ) )
lowerCAmelCase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(UpperCamelCase_ ) > 0:
blocks.append("\n".join(UpperCamelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(UpperCamelCase_ ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def _a ( UpperCamelCase_ : Dict ) -> int:
"""simple docstring"""
def _inner(UpperCamelCase_ : Tuple ):
return key(UpperCamelCase_ ).lower().replace("_" , "" )
return _inner
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]=None ) -> List[Any]:
"""simple docstring"""
def noop(UpperCamelCase_ : Union[str, Any] ):
return x
if key is None:
lowerCAmelCase__ = noop
# Constants are all uppercase, they go first.
lowerCAmelCase__ = [obj for obj in objects if key(UpperCamelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase__ = [obj for obj in objects if key(UpperCamelCase_ )[0].isupper() and not key(UpperCamelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase__ = [obj for obj in objects if not key(UpperCamelCase_ )[0].isupper()]
lowerCAmelCase__ = ignore_underscore(UpperCamelCase_ )
return sorted(UpperCamelCase_ , key=UpperCamelCase_ ) + sorted(UpperCamelCase_ , key=UpperCamelCase_ ) + sorted(UpperCamelCase_ , key=UpperCamelCase_ )
def _a ( UpperCamelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
def _replace(UpperCamelCase_ : List[Any] ):
lowerCAmelCase__ = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
lowerCAmelCase__ = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase__ = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(UpperCamelCase_ )] ) + "]"
lowerCAmelCase__ = import_statement.split("\n" )
if len(UpperCamelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase__ = 2 if lines[1].strip() == "[" else 1
lowerCAmelCase__ = [(i, _re_strip_line.search(UpperCamelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase__ = sort_objects(UpperCamelCase_ , key=lambda UpperCamelCase_ : x[1] )
lowerCAmelCase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(UpperCamelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase__ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase__ = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase__ = keys[:-1]
lowerCAmelCase__ = get_indent(lines[1] ) + ", ".join([F"\"{k}\"" for k in sort_objects(UpperCamelCase_ )] )
return "\n".join(UpperCamelCase_ )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase__ = _re_bracket_content.sub(_replace , UpperCamelCase_ )
return import_statement
def _a ( UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any]=True ) -> List[str]:
"""simple docstring"""
with open(UpperCamelCase_ , "r" ) as f:
lowerCAmelCase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase__ = split_code_in_indented_blocks(
UpperCamelCase_ , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(UpperCamelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase__ = main_blocks[block_idx]
lowerCAmelCase__ = block.split("\n" )
# Get to the start of the imports.
lowerCAmelCase__ = 0
while line_idx < len(UpperCamelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase__ = len(UpperCamelCase_ )
else:
line_idx += 1
if line_idx >= len(UpperCamelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase__ = "\n".join(block_lines[line_idx:-1] )
lowerCAmelCase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase__ = split_code_in_indented_blocks(UpperCamelCase_ , indent_level=UpperCamelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase__ = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase__ = [(pattern.search(UpperCamelCase_ ).groups()[0] if pattern.search(UpperCamelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase__ = [(i, key) for i, key in enumerate(UpperCamelCase_ ) if key is not None]
lowerCAmelCase__ = [x[0] for x in sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase__ = 0
lowerCAmelCase__ = []
for i in range(len(UpperCamelCase_ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(UpperCamelCase_ )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase__ = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(UpperCamelCase_ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(UpperCamelCase_ , "w" ) as f:
f.write("\n".join(UpperCamelCase_ ) )
def _a ( UpperCamelCase_ : List[str]=True ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = []
for root, _, files in os.walk(UpperCamelCase_ ):
if "__init__.py" in files:
lowerCAmelCase__ = sort_imports(os.path.join(UpperCamelCase_ , "__init__.py" ) , check_only=UpperCamelCase_ )
if result:
lowerCAmelCase__ = [os.path.join(UpperCamelCase_ , "__init__.py" )]
if len(UpperCamelCase_ ) > 0:
raise ValueError(F"Would overwrite {len(UpperCamelCase_ )} files, run `make style`." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
a_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 340 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a_ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
a_ = '''hopper-medium-v2'''
a_ = gym.make(env_name)
a_ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
a_ = env.reset()
a_ = 0
a_ = 0
a_ = 1000
a_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a_ = pipeline(obs, planning_horizon=32)
# execute action in environment
a_, a_, a_, a_ = env.step(denorm_actions)
a_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
F" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
a_ = next_observation
except KeyboardInterrupt:
pass
print(F"Total reward: {total_reward}")
| 340 | 1 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
SCREAMING_SNAKE_CASE :Union[str, Any] = ['small', 'medium', 'large']
SCREAMING_SNAKE_CASE :Dict = 'lm_head.decoder.weight'
SCREAMING_SNAKE_CASE :Any = 'lm_head.weight'
def UpperCAmelCase ( a_ , a_ ) -> Dict:
"""simple docstring"""
__A = torch.load(a_ )
__A = d.pop(a_ )
os.makedirs(a_ , exist_ok=a_ )
torch.save(a_ , os.path.join(a_ , a_ ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Dict = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
SCREAMING_SNAKE_CASE :Optional[Any] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
SCREAMING_SNAKE_CASE :Dict = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
SCREAMING_SNAKE_CASE :Optional[int] = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 124 |
from __future__ import annotations
def UpperCAmelCase ( a_ = 4 ) -> list[list[int]]:
"""simple docstring"""
__A = abs(a_ ) or 4
return [[1 + x + y * row_size for x in range(a_ )] for y in range(a_ )]
def UpperCAmelCase ( a_ ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(a_ ) )
# OR.. transpose(reverse_column(matrix))
def UpperCAmelCase ( a_ ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(a_ ) )
# OR.. reverse_column(reverse_row(matrix))
def UpperCAmelCase ( a_ ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(a_ ) )
# OR.. transpose(reverse_row(matrix))
def UpperCAmelCase ( a_ ) -> list[list[int]]:
"""simple docstring"""
__A = [list(a_ ) for x in zip(*a_ )]
return matrix
def UpperCAmelCase ( a_ ) -> list[list[int]]:
"""simple docstring"""
__A = matrix[::-1]
return matrix
def UpperCAmelCase ( a_ ) -> list[list[int]]:
"""simple docstring"""
__A = [x[::-1] for x in matrix]
return matrix
def UpperCAmelCase ( a_ ) -> None:
"""simple docstring"""
for i in matrix:
print(*a_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Union[str, Any] = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
SCREAMING_SNAKE_CASE :Tuple = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
SCREAMING_SNAKE_CASE :Union[str, Any] = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 124 | 1 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
A: List[Any] = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _snake_case ( UpperCamelCase : int ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _snake_case ( UpperCamelCase : str , UpperCamelCase : Union[str, Any] ):
if args.student_type == "roberta":
UpperCAmelCase : Optional[Any] = False
elif args.student_type == "gpt2":
UpperCAmelCase : Optional[Any] = False
def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] ):
if args.student_type == "roberta":
UpperCAmelCase : Tuple = False
def _snake_case ( ):
UpperCAmelCase : Optional[Any] = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=__snake_case , required=__snake_case , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=__snake_case , required=__snake_case , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=__snake_case , choices=["""distilbert""", """roberta""", """gpt2"""] , required=__snake_case , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=__snake_case , required=__snake_case , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=__snake_case , type=__snake_case , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=__snake_case , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=__snake_case , required=__snake_case , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=__snake_case , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=__snake_case , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=__snake_case , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=__snake_case , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=__snake_case , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=__snake_case , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=__snake_case , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=__snake_case , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=__snake_case , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=__snake_case , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=__snake_case , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=__snake_case , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.""" , )
parser.add_argument("""--n_epoch""" , type=__snake_case , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=__snake_case , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__snake_case , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=__snake_case , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=__snake_case , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5e-4 , type=__snake_case , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=__snake_case , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=__snake_case , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=__snake_case , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=__snake_case , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=__snake_case , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=__snake_case , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=__snake_case , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=__snake_case , default=500 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=__snake_case , default=4000 , help="""Checkpoint interval.""" )
UpperCAmelCase : Any = parser.parse_args()
sanity_checks(__snake_case )
# ARGS #
init_gpu_params(__snake_case )
set_seed(__snake_case )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(F"Param: {args}" )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(__snake_case ) , __snake_case , indent=4 )
git_log(args.dump_path )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = MODEL_CLASSES[args.student_type]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase : Any = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase : int = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase : Optional[Any] = tokenizer.all_special_tokens.index(__snake_case )
UpperCAmelCase : Tuple = tokenizer.all_special_ids[idx]
logger.info(F"Special tokens {special_tok_ids}" )
UpperCAmelCase : List[Any] = special_tok_ids
UpperCAmelCase : str = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"Loading data from {args.data_file}" )
with open(args.data_file , """rb""" ) as fp:
UpperCAmelCase : Tuple = pickle.load(__snake_case )
if args.mlm:
logger.info(F"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , """rb""" ) as fp:
UpperCAmelCase : List[str] = pickle.load(__snake_case )
UpperCAmelCase : Tuple = np.maximum(__snake_case , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase : Dict = 0.0 # do not predict special tokens
UpperCAmelCase : str = torch.from_numpy(__snake_case )
else:
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : Optional[int] = LmSeqsDataset(params=__snake_case , data=__snake_case )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(F"Loading student config from {args.student_config}" )
UpperCAmelCase : Optional[int] = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase : Union[str, Any] = True
if args.student_pretrained_weights is not None:
logger.info(F"Loading pretrained weights from {args.student_pretrained_weights}" )
UpperCAmelCase : Tuple = student_model_class.from_pretrained(args.student_pretrained_weights , config=__snake_case )
else:
UpperCAmelCase : Optional[Any] = student_model_class(__snake_case )
if args.n_gpu > 0:
student.to(F"cuda:{args.local_rank}" )
logger.info("""Student loaded.""" )
# TEACHER #
UpperCAmelCase : Optional[Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__snake_case )
if args.n_gpu > 0:
teacher.to(F"cuda:{args.local_rank}" )
logger.info(F"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__snake_case , __snake_case )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__snake_case , __snake_case )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase : List[str] = Distiller(
params=__snake_case , dataset=__snake_case , token_probs=__snake_case , student=__snake_case , teacher=__snake_case )
distiller.train()
logger.info("""Let\'s go get some drinks.""" )
if __name__ == "__main__":
main()
| 109 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case = 16000 ) -> Any:
'''simple docstring'''
lowerCamelCase__ = int(round(sample_rate * max_length ) )
if len(__snake_case ) <= sample_length:
return wav
lowerCamelCase__ = randint(0 ,len(__snake_case ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = field(default=lowerCAmelCase , metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """A file containing the training audio paths and labels."""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """A file containing the validation audio paths and labels."""} )
lowerCAmelCase_ = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
lowerCAmelCase_ = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
lowerCAmelCase_ = field(
default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , )
lowerCAmelCase_ = field(
default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
lowerCAmelCase_ = field(
default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , )
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = field(
default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} )
lowerCAmelCase_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , __lowerCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def lowerCAmelCase__() -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' ,__snake_case ,__snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase__ = training_args.get_process_log_level()
logger.setLevel(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowerCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
lowerCamelCase__ = DatasetDict()
lowerCamelCase__ = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.train_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCamelCase__ = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.eval_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'''Make sure to set `--label_column_name` to the correct text column - one of '''
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowerCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path ,return_attention_mask=model_args.attention_mask ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowerCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name ,datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowerCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(__snake_case ):
lowerCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
lowerCamelCase__ = random_subsample(
audio['''array'''] ,max_length=data_args.max_length_seconds ,sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__snake_case )
lowerCamelCase__ = feature_extractor(__snake_case ,sampling_rate=feature_extractor.sampling_rate )
lowerCamelCase__ = {model_input_name: inputs.get(__snake_case )}
lowerCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(__snake_case ):
lowerCamelCase__ = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
lowerCamelCase__ = feature_extractor(__snake_case ,sampling_rate=feature_extractor.sampling_rate )
lowerCamelCase__ = {model_input_name: inputs.get(__snake_case )}
lowerCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowerCamelCase__ = raw_datasets['''train'''].features[data_args.label_column_name].names
lowerCamelCase__ , lowerCamelCase__ = {}, {}
for i, label in enumerate(__snake_case ):
lowerCamelCase__ = str(__snake_case )
lowerCamelCase__ = label
# Load the accuracy metric from the datasets package
lowerCamelCase__ = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(__snake_case ):
lowerCamelCase__ = np.argmax(eval_pred.predictions ,axis=1 )
return metric.compute(predictions=__snake_case ,references=eval_pred.label_ids )
lowerCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path ,num_labels=len(__snake_case ) ,labelaid=__snake_case ,idalabel=__snake_case ,finetuning_task='''audio-classification''' ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=__snake_case ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase__ = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__snake_case ,output_all_columns=__snake_case )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase__ = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__snake_case ,output_all_columns=__snake_case )
# Initialize our trainer
lowerCamelCase__ = Trainer(
model=__snake_case ,args=__snake_case ,train_dataset=raw_datasets['''train'''] if training_args.do_train else None ,eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None ,compute_metrics=__snake_case ,tokenizer=__snake_case ,)
# Training
if training_args.do_train:
lowerCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ = last_checkpoint
lowerCamelCase__ = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
trainer.log_metrics('''train''' ,train_result.metrics )
trainer.save_metrics('''train''' ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase__ = trainer.evaluate()
trainer.log_metrics('''eval''' ,__snake_case )
trainer.save_metrics('''eval''' ,__snake_case )
# Write model card and (optionally) push to hub
lowerCamelCase__ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__snake_case )
else:
trainer.create_model_card(**__snake_case )
if __name__ == "__main__":
main()
| 209 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase : str = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowercase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 364 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowercase : Tuple = get_logger(__name__)
lowercase : Optional[int] = Path(__file__).parent / """model_card_template.md"""
lowercase : Dict = uuida().hex
lowercase : Tuple = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
lowercase : str = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
lowercase : Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def _snake_case( SCREAMING_SNAKE_CASE__ = None ) -> str:
lowercase : str = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"; torch/{_torch_version}"
if is_flax_available():
ua += f"; jax/{_jax_version}"
ua += f"; flax/{_flax_version}"
if is_onnx_available():
ua += f"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get("""DIFFUSERS_IS_CI""" , """""" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + user_agent
return ua
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ) -> Dict:
if token is None:
lowercase : Optional[int] = HfFolder.get_token()
if organization is None:
lowercase : int = whoami(SCREAMING_SNAKE_CASE__ )["""name"""]
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
if not is_jinja_available():
raise ValueError(
"""Modelcard rendering is based on Jinja templates."""
""" Please make sure to have `jinja` installed before using `create_model_card`."""
""" To install it, please run `pip install Jinja2`.""" )
if hasattr(SCREAMING_SNAKE_CASE__ , """local_rank""" ) and args.local_rank not in [-1, 0]:
return
lowercase : str = args.hub_token if hasattr(SCREAMING_SNAKE_CASE__ , """hub_token""" ) else None
lowercase : int = get_full_repo_name(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , repo_name=SCREAMING_SNAKE_CASE__ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE__ , """dataset_name""" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE__ , """gradient_accumulation_steps""" ) else None
) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , """adam_beta1""" ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , """adam_beta2""" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE__ , """adam_weight_decay""" ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE__ , """adam_epsilon""" ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE__ , """lr_scheduler""" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE__ , """lr_warmup_steps""" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE__ , """ema_inv_gamma""" ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE__ , """ema_power""" ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE__ , """ema_max_decay""" ) else None , mixed_precision=args.mixed_precision , )
lowercase : str = os.path.join(args.output_dir , """README.md""" )
model_card.save(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Optional[Any]:
if resolved_file is None or commit_hash is not None:
return commit_hash
lowercase : List[Any] = str(Path(SCREAMING_SNAKE_CASE__ ).as_posix() )
lowercase : Any = re.search(R"""snapshots/([^/]+)/""" , SCREAMING_SNAKE_CASE__ )
if search is None:
return None
lowercase : List[Any] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowercase : Optional[Any] = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
lowercase : Optional[int] = os.path.join(hf_cache_home, """diffusers""")
def _snake_case( SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ) -> None:
if new_cache_dir is None:
lowercase : Union[str, Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
lowercase : List[str] = old_diffusers_cache
lowercase : Dict = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
lowercase : int = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowercase : Any = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE__ )
new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
os.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
try:
os.symlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except OSError:
logger.warning(
"""Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowercase : Dict = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
lowercase : Any = 0
else:
with open(cache_version_file) as f:
try:
lowercase : List[Any] = int(f.read())
except ValueError:
lowercase : int = 0
if cache_version < 1:
lowercase : Union[str, Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
lowercase : int = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
F'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
"""the directory exists and can be written to."""
)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> str:
if variant is not None:
lowercase : List[str] = weights_name.split(""".""" )
lowercase : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
lowercase : int = """.""".join(SCREAMING_SNAKE_CASE__ )
return weights_name
def _snake_case( SCREAMING_SNAKE_CASE__ , *,
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , ) -> Optional[Any]:
lowercase : Optional[int] = str(SCREAMING_SNAKE_CASE__ )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
return pretrained_model_name_or_path
elif os.path.isdir(SCREAMING_SNAKE_CASE__ ):
if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
# Load from a PyTorch checkpoint
lowercase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
lowercase : Any = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
else:
raise EnvironmentError(
f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse("""0.20.0""" )
):
try:
lowercase : Any = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
warnings.warn(
f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , SCREAMING_SNAKE_CASE__ , )
return model_file
except: # noqa: E722
warnings.warn(
f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}' so that the correct variant file can be added." , SCREAMING_SNAKE_CASE__ , )
try:
# 2. Load model file as usual
lowercase : int = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
"""listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """
"""token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """
"""login`.""" )
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
"""this model name. Check the model page at """
f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
f" directory containing a file named {weights_name} or"
""" \nCheckout your internet connection or see how to run the library in"""
""" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""" )
except EnvironmentError:
raise EnvironmentError(
f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
"""'https://huggingface.co/models', make sure you don't have a local directory with the same name. """
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing a file named {weights_name}" )
| 285 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : str , A__ : list[str] | None = None , A__ : dict[str, float] | None = None , A__ : bool = False , ):
'''simple docstring'''
__lowerCamelCase = cipher_alphabet or [chr(A__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
__lowerCamelCase = {
"""a""": 0.08_497,
"""b""": 0.01_492,
"""c""": 0.02_202,
"""d""": 0.04_253,
"""e""": 0.11_162,
"""f""": 0.02_228,
"""g""": 0.02_015,
"""h""": 0.06_094,
"""i""": 0.07_546,
"""j""": 0.00_153,
"""k""": 0.01_292,
"""l""": 0.04_025,
"""m""": 0.02_406,
"""n""": 0.06_749,
"""o""": 0.07_507,
"""p""": 0.01_929,
"""q""": 0.00_095,
"""r""": 0.07_587,
"""s""": 0.06_327,
"""t""": 0.09_356,
"""u""": 0.02_758,
"""v""": 0.00_978,
"""w""": 0.02_560,
"""x""": 0.00_150,
"""y""": 0.01_994,
"""z""": 0.00_077,
}
else:
# Custom frequencies dictionary
__lowerCamelCase = frequencies_dict
if not case_sensitive:
__lowerCamelCase = ciphertext.lower()
# Chi squared statistic values
__lowerCamelCase = {}
# cycle through all of the shifts
for shift in range(len(A__ ) ):
__lowerCamelCase = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
__lowerCamelCase = (alphabet_letters.index(letter.lower() ) - shift) % len(
A__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
__lowerCamelCase = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
__lowerCamelCase = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
__lowerCamelCase = decrypted_with_shift.lower().count(A__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowerCamelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowerCamelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
__lowerCamelCase = decrypted_with_shift.count(A__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowerCamelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowerCamelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
__lowerCamelCase = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(A__ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
__lowerCamelCase = min(
A__ , key=A__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
__lowerCamelCase
), (
__lowerCamelCase
),
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 12 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCamelCase__( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple=0.999 , UpperCamelCase__ : Any="cosine" , )->List[str]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase__ : Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase__ : str ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
A__ = []
for i in range(UpperCamelCase__ ):
A__ = i / num_diffusion_timesteps
A__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase__ ) / alpha_bar_fn(UpperCamelCase__ ) , UpperCamelCase__ ) )
return torch.tensor(UpperCamelCase__ , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = [e.name for e in KarrasDiffusionSchedulers]
__SCREAMING_SNAKE_CASE = 2
@register_to_config
def __init__( self,__lowerCamelCase = 1000,__lowerCamelCase = 0.00085,__lowerCamelCase = 0.012,__lowerCamelCase = "linear",__lowerCamelCase = None,__lowerCamelCase = "epsilon",__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = 1.0,__lowerCamelCase = "linspace",__lowerCamelCase = 0,):
if trained_betas is not None:
A__ = torch.tensor(__lowerCamelCase,dtype=torch.floataa )
elif beta_schedule == "linear":
A__ = torch.linspace(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A__ = (
torch.linspace(beta_start**0.5,beta_end**0.5,__lowerCamelCase,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A__ = betas_for_alpha_bar(__lowerCamelCase,alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
A__ = betas_for_alpha_bar(__lowerCamelCase,alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
A__ = 1.0 - self.betas
A__ = torch.cumprod(self.alphas,dim=0 )
# set all values
self.set_timesteps(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
A__ = use_karras_sigmas
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=None ):
if schedule_timesteps is None:
A__ = self.timesteps
A__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A__ = 1 if len(__lowerCamelCase ) > 1 else 0
else:
A__ = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
A__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,):
A__ = self.index_for_timestep(__lowerCamelCase )
A__ = self.sigmas[step_index]
A__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None,__lowerCamelCase = None,):
A__ = num_inference_steps
A__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A__ = np.linspace(0,num_train_timesteps - 1,__lowerCamelCase,dtype=__lowerCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ = (np.arange(0,__lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(__lowerCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ = (np.arange(__lowerCamelCase,0,-step_ratio )).round().copy().astype(__lowerCamelCase )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
A__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A__ = np.log(__lowerCamelCase )
A__ = np.interp(__lowerCamelCase,np.arange(0,len(__lowerCamelCase ) ),__lowerCamelCase )
if self.config.use_karras_sigmas:
A__ = self._convert_to_karras(in_sigmas=__lowerCamelCase,num_inference_steps=self.num_inference_steps )
A__ = np.array([self._sigma_to_t(__lowerCamelCase,__lowerCamelCase ) for sigma in sigmas] )
A__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A__ = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase )
A__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
A__ = torch.from_numpy(__lowerCamelCase )
A__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__lowerCamelCase ).startswith('''mps''' ):
# mps does not support float64
A__ = timesteps.to(__lowerCamelCase,dtype=torch.floataa )
else:
A__ = timesteps.to(device=__lowerCamelCase )
# empty dt and derivative
A__ = None
A__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A__ = defaultdict(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
# get log sigma
A__ = np.log(__lowerCamelCase )
# get distribution
A__ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
A__ = np.cumsum((dists >= 0),axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
A__ = low_idx + 1
A__ = log_sigmas[low_idx]
A__ = log_sigmas[high_idx]
# interpolate sigmas
A__ = (low - log_sigma) / (low - high)
A__ = np.clip(__lowerCamelCase,0,1 )
# transform interpolation to time range
A__ = (1 - w) * low_idx + w * high_idx
A__ = t.reshape(sigma.shape )
return t
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = in_sigmas[-1].item()
A__ = in_sigmas[0].item()
A__ = 7.0 # 7.0 is the value used in the paper
A__ = np.linspace(0,1,__lowerCamelCase )
A__ = sigma_min ** (1 / rho)
A__ = sigma_max ** (1 / rho)
A__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def UpperCamelCase ( self ):
return self.dt is None
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = True,):
A__ = self.index_for_timestep(__lowerCamelCase )
# advance index counter by 1
A__ = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A__ = self.sigmas[step_index]
A__ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
A__ = self.sigmas[step_index - 1]
A__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A__ = 0
A__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A__ = sigma_hat if self.state_in_first_order else sigma_next
A__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A__ = sigma_hat if self.state_in_first_order else sigma_next
A__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
A__ = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.config.clip_sample:
A__ = pred_original_sample.clamp(
-self.config.clip_sample_range,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A__ = sigma_next - sigma_hat
# store for 2nd order step
A__ = derivative
A__ = dt
A__ = sample
else:
# 2. 2nd order / Heun's method
A__ = (sample - pred_original_sample) / sigma_next
A__ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
A__ = self.dt
A__ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
A__ = None
A__ = None
A__ = None
A__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
A__ = self.sigmas.to(device=original_samples.device,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__lowerCamelCase ):
# mps does not support float64
A__ = self.timesteps.to(original_samples.device,dtype=torch.floataa )
A__ = timesteps.to(original_samples.device,dtype=torch.floataa )
else:
A__ = self.timesteps.to(original_samples.device )
A__ = timesteps.to(original_samples.device )
A__ = [self.index_for_timestep(__lowerCamelCase,__lowerCamelCase ) for t in timesteps]
A__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A__ = sigma.unsqueeze(-1 )
A__ = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 193 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __lowercase :
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = None
__lowerCAmelCase = None
def __A ( ) -> Node | None:
__a : Union[str, Any] = Node(1)
__a : Optional[Any] = Node(2)
__a : Tuple = Node(3)
__a : Union[str, Any] = Node(4)
__a : int = Node(5)
return tree
def __A ( a_ :Node | None) -> list[int]:
return [root.data, *preorder(root.left), *preorder(root.right)] if root else []
def __A ( a_ :Node | None) -> list[int]:
return postorder(root.left) + postorder(root.right) + [root.data] if root else []
def __A ( a_ :Node | None) -> list[int]:
return [*inorder(root.left), root.data, *inorder(root.right)] if root else []
def __A ( a_ :Node | None) -> int:
return (max(height(root.left) , height(root.right)) + 1) if root else 0
def __A ( a_ :Node | None) -> Sequence[Node | None]:
__a : list[Any] = []
if root is None:
return output
__a : Optional[Any] = deque([root])
while process_queue:
__a : Union[str, Any] = process_queue.popleft()
output.append(node.data)
if node.left:
process_queue.append(node.left)
if node.right:
process_queue.append(node.right)
return output
def __A ( a_ :Node | None , a_ :int) -> Sequence[Node | None]:
__a : list[Any] = []
def populate_output(a_ :Node | None , a_ :int) -> None:
if not root:
return
if level == 1:
output.append(root.data)
elif level > 1:
populate_output(root.left , level - 1)
populate_output(root.right , level - 1)
populate_output(a_ , a_)
return output
def __A ( a_ :Node | None , a_ :int) -> Sequence[Node | None]:
__a : list[Any] = []
def populate_output(a_ :Node | None , a_ :int) -> None:
if root is None:
return
if level == 1:
output.append(root.data)
elif level > 1:
populate_output(root.right , level - 1)
populate_output(root.left , level - 1)
populate_output(a_ , a_)
return output
def __A ( a_ :Node | None) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
__a : list[Sequence[Node | None]] = []
__a : Any = 0
__a : List[Any] = height(a_)
for h in range(1 , height_tree + 1):
if not flag:
output.append(get_nodes_from_left_to_right(a_ , a_))
__a : Union[str, Any] = 1
else:
output.append(get_nodes_from_right_to_left(a_ , a_))
__a : Optional[int] = 0
return output
def __A ( ) -> None: # Main function for testing.
__a : List[Any] = make_tree()
print(F"""In-order Traversal: {inorder(a_)}""")
print(F"""Pre-order Traversal: {preorder(a_)}""")
print(F"""Post-order Traversal: {postorder(a_)}""" , '''\n''')
print(F"""Height of Tree: {height(a_)}""" , '''\n''')
print('''Complete Level Order Traversal: ''')
print(level_order(a_) , '''\n''')
print('''Level-wise order Traversal: ''')
for level in range(1 , height(a_) + 1):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(a_ , level=a_))
print('''\nZigZag order Traversal: ''')
print(zigzag(a_))
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 353 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
__lowerCAmelCase = '''CIDAS/clipseg-rd64-refined'''
__lowerCAmelCase = '''image_segmenter'''
__lowerCAmelCase = CLIPSegForImageSegmentation
__lowerCAmelCase = ['''image''', '''text''']
__lowerCAmelCase = ['''image''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''vision'''] )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
return self.pre_processor(text=[label] , images=[image] , padding=_UpperCAmelCase , return_tensors='''pt''' )
def _lowerCamelCase ( self , _UpperCAmelCase ):
with torch.no_grad():
__a : List[str] = self.model(**_UpperCAmelCase ).logits
return logits
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : str = outputs.cpu().detach().numpy()
__a : int = 0
__a : Optional[int] = 1
return Image.fromarray((array * 255).astype(np.uinta ) ) | 188 | 0 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowerCAmelCase__ : List[str] = []
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCAmelCase_ ) )
elif isinstance(lowerCAmelCase_ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowerCAmelCase_ ) )
elif isinstance(lowerCAmelCase_ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowerCAmelCase__ : Union[str, Any] = []
for d in reversed(lowerCAmelCase_ ):
idx.append(flat_idx % d )
lowerCAmelCase__ : Dict = flat_idx // d
return tuple(reversed(lowerCAmelCase_ ) )
@torch.jit.ignore
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ) -> Tuple:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(SCREAMING_SNAKE_CASE_ ) -> None:
lowerCAmelCase__ : Tuple = True
for i in range(len(lowerCAmelCase_ ) ):
lowerCAmelCase__ : List[str] = -1 * (i + 1)
l[reversed_idx] &= tally
lowerCAmelCase__ : List[str] = l[reversed_idx]
if start_edges is None:
lowerCAmelCase__ : Union[str, Any] = [s == 0 for s in start]
reduce_edge_list(lowerCAmelCase_ )
if end_edges is None:
lowerCAmelCase__ : Any = [e == (d - 1) for e, d in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
reduce_edge_list(lowerCAmelCase_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCAmelCase_ ) == 0:
return [()]
elif len(lowerCAmelCase_ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
lowerCAmelCase__ : List[Tuple[slice, ...]] = []
lowerCAmelCase__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if s == e:
path_list.append(slice(lowerCAmelCase_ , s + 1 ) )
else:
break
lowerCAmelCase__ : Tuple[slice, ...] = tuple(lowerCAmelCase_ )
lowerCAmelCase__ : Any = len(lowerCAmelCase_ )
# start == end, and we're done
if divergence_idx == len(lowerCAmelCase_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowerCAmelCase__ : Tuple = start[divergence_idx]
return tuple(
path + (slice(lowerCAmelCase_ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowerCAmelCase__ : Dict = end[divergence_idx]
return tuple(
path + (slice(lowerCAmelCase_ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
lowerCAmelCase__ : Optional[Any] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
lowerCAmelCase__ : Union[str, Any] = t.shape[:no_batch_dims]
lowerCAmelCase__ : Optional[int] = list(_flat_idx_to_idx(lowerCAmelCase_ , lowerCAmelCase_ ) )
# _get_minimal_slice_set is inclusive
lowerCAmelCase__ : Optional[Any] = list(_flat_idx_to_idx(flat_end - 1 , lowerCAmelCase_ ) )
# Get an ordered list of slices to perform
lowerCAmelCase__ : Any = _get_minimal_slice_set(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
lowerCAmelCase__ : Optional[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , ) -> Optional[int]:
if not (len(lowerCAmelCase_ ) > 0):
raise ValueError('Must provide at least one input' )
lowerCAmelCase__ : Union[str, Any] = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCAmelCase_ )]
lowerCAmelCase__ : int = tuple([max(lowerCAmelCase_ ) for s in zip(*lowerCAmelCase_ )] )
def _prep_inputs(SCREAMING_SNAKE_CASE_ ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
lowerCAmelCase__ : Union[str, Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
lowerCAmelCase__ : Any = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
lowerCAmelCase__ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
lowerCAmelCase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCAmelCase_ )
lowerCAmelCase__ : Optional[int] = None
if _out is not None:
lowerCAmelCase__ : Tuple = tensor_tree_map(lambda SCREAMING_SNAKE_CASE_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
lowerCAmelCase__ : Union[str, Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
lowerCAmelCase__ : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(SCREAMING_SNAKE_CASE_ ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Optional[int] = prepped_outputs
for _ in range(lowerCAmelCase_ ):
# Chunk the input
if not low_mem:
lowerCAmelCase__ : Optional[int] = _select_chunk
else:
lowerCAmelCase__ : Optional[Any] = partial(
_chunk_slice , flat_start=lowerCAmelCase_ , flat_end=min(lowerCAmelCase_ , i + chunk_size ) , no_batch_dims=len(lowerCAmelCase_ ) , )
lowerCAmelCase__ : Dict[str, Any] = tensor_tree_map(lowerCAmelCase_ , lowerCAmelCase_ )
# Run the layer on the chunk
lowerCAmelCase__ : Tuple = layer(**lowerCAmelCase_ )
# Allocate space for the output
if out is None:
lowerCAmelCase__ : str = tensor_tree_map(lambda SCREAMING_SNAKE_CASE_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCAmelCase_ )
# Put the chunk in its pre-allocated space
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
def assign(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
for k, v in da.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
assign(lowerCAmelCase_ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
lowerCAmelCase__ : List[str] = da[k]
assign(lowerCAmelCase_ , lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
for xa, xa in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
lowerCAmelCase__ : Tuple = xa
elif isinstance(lowerCAmelCase_ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
lowerCAmelCase__ : List[str] = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
lowerCAmelCase__ : Any = tensor_tree_map(lambda SCREAMING_SNAKE_CASE_ : t.view(orig_batch_dims + t.shape[1:] ) , lowerCAmelCase_ )
return out
class A__ :
def __init__( self : int , a : int = 512 , ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = max_chunk_size
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : Optional[tuple] = None
def _lowerCamelCase ( self : Optional[Any] , a : Callable , a : tuple , a : int ):
'''simple docstring'''
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
lowerCAmelCase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
lowerCAmelCase__ : Optional[int] = [c for c in candidates if c > min_chunk_size]
lowerCAmelCase__ : Tuple = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(a : int ) -> bool:
try:
with torch.no_grad():
fn(*__a , chunk_size=__a )
return True
except RuntimeError:
return False
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Tuple = len(__a ) - 1
while i > min_viable_chunk_size_index:
lowerCAmelCase__ : Dict = test_chunk_size(candidates[i] )
if not viable:
lowerCAmelCase__ : List[str] = (min_viable_chunk_size_index + i) // 2
else:
lowerCAmelCase__ : Any = i
lowerCAmelCase__ : str = (i + len(__a ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowerCamelCase ( self : Optional[Any] , a : Iterable , a : Iterable ):
'''simple docstring'''
lowerCAmelCase__ : Any = True
for aa, aa in zip(__a , __a ):
assert type(__a ) == type(__a )
if isinstance(__a , (list, tuple) ):
consistent &= self._compare_arg_caches(__a , __a )
elif isinstance(__a , __a ):
lowerCAmelCase__ : str = [v for _, v in sorted(aa.items() , key=lambda a : x[0] )]
lowerCAmelCase__ : Optional[int] = [v for _, v in sorted(aa.items() , key=lambda a : x[0] )]
consistent &= self._compare_arg_caches(__a , __a )
else:
consistent &= aa == aa
return consistent
def _lowerCamelCase ( self : Union[str, Any] , a : Callable , a : tuple , a : int , ):
'''simple docstring'''
lowerCAmelCase__ : int = True
lowerCAmelCase__ : tuple = tree_map(lambda a : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__a )
lowerCAmelCase__ : str = self._compare_arg_caches(self.cached_arg_data , __a )
else:
# Otherwise, we can reuse the precomputed value
lowerCAmelCase__ : List[Any] = False
if not consistent:
lowerCAmelCase__ : int = self._determine_favorable_chunk_size(
__a , __a , __a , )
lowerCAmelCase__ : Dict = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size | 212 |
from math import pi
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10)) | 233 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class A_ :
'''simple docstring'''
__snake_case = 42
__snake_case = None
__snake_case = None
def UpperCamelCase__ ( ):
__lowerCamelCase : List[str] = Node(1 )
__lowerCamelCase : Any = Node(2 )
__lowerCamelCase : List[Any] = Node(3 )
__lowerCamelCase : Tuple = Node(4 )
__lowerCamelCase : Any = Node(5 )
return tree
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : list[Any] = []
if root is None:
return output
__lowerCamelCase : Optional[int] = deque([root] )
while process_queue:
__lowerCamelCase : Optional[int] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : list[Any] = []
def populate_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return output
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : list[Any] = []
def populate_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return output
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
if root is None:
return []
__lowerCamelCase : list[Sequence[Node | None]] = []
__lowerCamelCase : Union[str, Any] = 0
__lowerCamelCase : Tuple = height(SCREAMING_SNAKE_CASE__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase : List[str] = 1
else:
output.append(get_nodes_from_right_to_left(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase : Tuple = 0
return output
def UpperCamelCase__ ( ): # Main function for testing.
__lowerCamelCase : List[Any] = make_tree()
print(f'In-order Traversal: {inorder(SCREAMING_SNAKE_CASE__ )}' )
print(f'Pre-order Traversal: {preorder(SCREAMING_SNAKE_CASE__ )}' )
print(f'Post-order Traversal: {postorder(SCREAMING_SNAKE_CASE__ )}' , '\n' )
print(f'Height of Tree: {height(SCREAMING_SNAKE_CASE__ )}' , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(SCREAMING_SNAKE_CASE__ ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(SCREAMING_SNAKE_CASE__ ) + 1 ):
print(f'Level {level}:' , get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE__ , level=SCREAMING_SNAKE_CASE__ ) )
print('\nZigZag order Traversal: ' )
print(zigzag(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 194 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase_ = logging.get_logger(__name__)
@dataclass
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self: Any , **a: Optional[Any] ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__lowerCamelCase : str = deprecated_arg[3:]
setattr(self , a , not kwargs.pop(a ) )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
__lowerCamelCase : str = kwargs.pop('torchscript' , self.torchscript )
__lowerCamelCase : int = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
__lowerCamelCase : Dict = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**a )
__snake_case = field(default=__UpperCamelCase , metadata={"""help""": """Trace the models using torchscript"""} )
__snake_case = field(default=__UpperCamelCase , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} )
__snake_case = field(
default="""O1""" , metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} , )
@cached_property
def _snake_case ( self: Dict ):
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
__lowerCamelCase : Dict = torch.device('cpu' )
__lowerCamelCase : str = 0
elif is_torch_tpu_available():
__lowerCamelCase : Optional[int] = xm.xla_device()
__lowerCamelCase : Dict = 0
else:
__lowerCamelCase : Any = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__lowerCamelCase : Union[str, Any] = torch.cuda.device_count()
return device, n_gpu
@property
def _snake_case ( self: Optional[Any] ):
return is_torch_tpu_available() and self.tpu
@property
def _snake_case ( self: Union[str, Any] ):
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _snake_case ( self: int ):
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def _snake_case ( self: Union[str, Any] ):
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def _snake_case ( self: List[Any] ):
return self.n_gpu > 0
| 194 | 1 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
_SCREAMING_SNAKE_CASE = {"""facebook/blenderbot-3B""": 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCamelCase = bs[:]
UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase_ )
cs.append(2**8 + n )
n += 1
UpperCamelCase = [chr(UpperCamelCase_ ) for n in cs]
return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) )
def lowercase( UpperCamelCase_ ) -> str:
'''simple docstring'''
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
return pairs
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict="replace" , lowerCamelCase_ : Optional[int]="<s>" , lowerCamelCase_ : Tuple="</s>" , lowerCamelCase_ : List[str]="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : Tuple="<unk>" , lowerCamelCase_ : Any="<pad>" , lowerCamelCase_ : List[str]="<mask>" , lowerCamelCase_ : List[str]=False , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase = json.load(lowerCamelCase_ )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = errors # how to handle errors in decoding
UpperCamelCase = bytes_to_unicode()
UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ , encoding="""utf-8""" ) as merges_handle:
UpperCamelCase = merges_handle.read().split("""\n""" )[1:-1]
UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
UpperCamelCase = {}
UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return len(self.encoder )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase = tuple(lowerCamelCase_ )
UpperCamelCase = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCamelCase = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(lowerCamelCase_ ):
try:
UpperCamelCase = word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(lowerCamelCase_ )
UpperCamelCase = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCamelCase = get_pairs(lowerCamelCase_ )
UpperCamelCase = """ """.join(lowerCamelCase_ )
UpperCamelCase = word
return word
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = []
for token in re.findall(self.pat , lowerCamelCase_ ):
UpperCamelCase = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) )
return bpe_tokens
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : int ):
"""simple docstring"""
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
return self.decoder.get(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = """""".join(lowerCamelCase_ )
UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + """\n""" )
UpperCamelCase = 0
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
UpperCamelCase = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any]=False , **lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
UpperCamelCase = """ """ + text
return (text, kwargs)
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : "Conversation" ):
"""simple docstring"""
UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(lowerCamelCase_ )
UpperCamelCase = """ """.join(lowerCamelCase_ )
UpperCamelCase = self.encode(lowerCamelCase_ )
if len(lowerCamelCase_ ) > self.model_max_length:
UpperCamelCase = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 343 | from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=3 , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[int]=10 , lowerCamelCase_ : List[str]=[10, 20, 30, 40] , lowerCamelCase_ : Tuple=[1, 1, 2, 1] , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : Tuple="relu" , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = embeddings_size
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = len(lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = TFResNetModel(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = TFResNetForImageClassification(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__lowerCAmelCase = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = TFResNetModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : str ):
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase = layer_type
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFResNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" )
# forward pass
UpperCamelCase = model(**lowerCamelCase_ )
# verify the logits
UpperCamelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase_ , atol=1E-4 ) )
| 343 | 1 |
from heapq import heappop, heappush
import numpy as np
def UpperCamelCase_( snake_case__: np.ndarray , snake_case__: tuple[int, int] , snake_case__: tuple[int, int] , snake_case__: bool , ) -> tuple[float | int, list[tuple[int, int]]]:
UpperCAmelCase__ , UpperCAmelCase__ = grid.shape
UpperCAmelCase__ = [-1, 1, 0, 0]
UpperCAmelCase__ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
UpperCAmelCase__ , UpperCAmelCase__ = [(0, source)], set()
UpperCAmelCase__ = np.full((rows, cols) , np.inf )
UpperCAmelCase__ = 0
UpperCAmelCase__ = np.empty((rows, cols) , dtype=UpperCamelCase__ )
UpperCAmelCase__ = None
while queue:
((UpperCAmelCase__) , (UpperCAmelCase__)) = heappop(UpperCamelCase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
UpperCAmelCase__ = []
while (x, y) != source:
path.append((x, y) )
UpperCAmelCase__ , UpperCAmelCase__ = predecessors[x, y]
path.append(UpperCamelCase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCamelCase__ ) ):
UpperCAmelCase__ , UpperCAmelCase__ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
UpperCAmelCase__ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCamelCase__ , (dist + 1, (nx, ny)) )
UpperCAmelCase__ = dist + 1
UpperCAmelCase__ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod() | 367 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 | 0 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = model.config
SCREAMING_SNAKE_CASE__ = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
SCREAMING_SNAKE_CASE__ = MBartConfig(
is_decoder=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , add_cross_attention=__UpperCamelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__UpperCamelCase , add_final_layer_norm=__UpperCamelCase , )
return encoder_config, decoder_config
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> int:
"""simple docstring"""
if "encoder.model" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
SCREAMING_SNAKE_CASE__ = """encoder.""" + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
SCREAMING_SNAKE_CASE__ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
SCREAMING_SNAKE_CASE__ = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
SCREAMING_SNAKE_CASE__ = """encoder.layernorm.bias"""
return name
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
SCREAMING_SNAKE_CASE__ = key.split(""".""" )
SCREAMING_SNAKE_CASE__ = int(key_split[3] )
SCREAMING_SNAKE_CASE__ = int(key_split[5] )
SCREAMING_SNAKE_CASE__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE__ = val[:dim, :]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE__ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE__ = val[:dim]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE__ = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
SCREAMING_SNAKE_CASE__ = val
return orig_state_dict
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Dict=None , __UpperCamelCase : int=False ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = DonutModel.from_pretrained(__UpperCamelCase ).eval()
# load HuggingFace model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_configs(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = DonutSwinModel(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = MBartForCausalLM(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = VisionEncoderDecoderModel(encoder=__UpperCamelCase , decoder=__UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = original_model.state_dict()
SCREAMING_SNAKE_CASE__ = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# verify results on scanned document
SCREAMING_SNAKE_CASE__ = load_dataset("""hf-internal-testing/example-documents""" )
SCREAMING_SNAKE_CASE__ = dataset["""test"""][0]["""image"""].convert("""RGB""" )
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast.from_pretrained(__UpperCamelCase , from_slow=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
SCREAMING_SNAKE_CASE__ = DonutProcessor(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = processor(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
SCREAMING_SNAKE_CASE__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
SCREAMING_SNAKE_CASE__ = """When is the coffee break?"""
SCREAMING_SNAKE_CASE__ = task_prompt.replace("""{user_input}""" , __UpperCamelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
SCREAMING_SNAKE_CASE__ = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
SCREAMING_SNAKE_CASE__ = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
SCREAMING_SNAKE_CASE__ = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
SCREAMING_SNAKE_CASE__ = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
SCREAMING_SNAKE_CASE__ = """hello world"""
else:
raise ValueError("""Model name not supported""" )
SCREAMING_SNAKE_CASE__ = original_model.decoder.tokenizer(__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors="""pt""" )[
"""input_ids"""
]
SCREAMING_SNAKE_CASE__ = original_model.encoder.model.patch_embed(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.encoder.embeddings(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 )
# verify encoder hidden states
SCREAMING_SNAKE_CASE__ = original_model.encoder(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = model.encoder(__UpperCamelCase ).last_hidden_state
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-2 )
# verify decoder hidden states
SCREAMING_SNAKE_CASE__ = original_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).logits
SCREAMING_SNAKE_CASE__ = model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase ).logits
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''naver-clova-ix/donut-base-finetuned-docvqa''',
required=False,
type=str,
help='''Name of the original model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
required=False,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub.''',
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 219 | # tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowerCamelCase : Any = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Any:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(__UpperCamelCase , id=__UpperCamelCase )
| 219 | 1 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = BertJapaneseTokenizer
a_ = False
a_ = True
def _lowercase ( self : Optional[Any] ):
super().setUp()
snake_case__ : int = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
snake_case__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _lowercase ( self : Dict , __A : int ):
snake_case__ : Optional[Any] = "こんにちは、世界。 \nこんばんは、世界。"
snake_case__ : Any = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def _lowercase ( self : Optional[int] , __A : Optional[int] ):
snake_case__, snake_case__ : List[str] = self.get_input_output_texts(__A )
snake_case__ : List[str] = tokenizer.encode(__A , add_special_tokens=__A )
snake_case__ : Dict = tokenizer.decode(__A , clean_up_tokenization_spaces=__A )
return text, ids
def _lowercase ( self : Optional[Any] ):
pass # TODO add if relevant
def _lowercase ( self : Optional[int] ):
pass # TODO add if relevant
def _lowercase ( self : Tuple ):
pass # TODO add if relevant
def _lowercase ( self : Tuple ):
snake_case__ : List[Any] = self.tokenizer_class(self.vocab_file )
snake_case__ : List[str] = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(__A , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def _lowercase ( self : int ):
snake_case__ : List[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(__A )
snake_case__ : str = "こんにちは、世界。\nこんばんは、世界。"
snake_case__ : Optional[int] = tokenizer.tokenize(__A )
self.assertListEqual(__A , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
snake_case__ : str = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__A , "wb" ) as handle:
pickle.dump(__A , __A )
with open(__A , "rb" ) as handle:
snake_case__ : Tuple = pickle.load(__A )
snake_case__ : int = tokenizer_new.tokenize(__A )
self.assertListEqual(__A , __A )
def _lowercase ( self : List[Any] ):
snake_case__ : Optional[int] = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _lowercase ( self : Tuple ):
try:
snake_case__ : List[Any] = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _lowercase ( self : List[Any] ):
try:
snake_case__ : Union[str, Any] = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _lowercase ( self : int ):
snake_case__ : int = MecabTokenizer(do_lower_case=__A , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _lowercase ( self : Optional[int] ):
try:
snake_case__ : List[Any] = MecabTokenizer(
do_lower_case=__A , normalize_text=__A , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def _lowercase ( self : List[Any] ):
snake_case__ : Dict = MecabTokenizer(normalize_text=__A , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def _lowercase ( self : Union[str, Any] ):
snake_case__ : int = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(__A )
snake_case__ : str = "こんにちは、世界。\nこんばんは、世界。"
snake_case__ : Union[str, Any] = tokenizer.tokenize(__A )
self.assertListEqual(__A , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
snake_case__ : int = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__A , "wb" ) as handle:
pickle.dump(__A , __A )
with open(__A , "rb" ) as handle:
snake_case__ : Optional[Any] = pickle.load(__A )
snake_case__ : Any = tokenizer_new.tokenize(__A )
self.assertListEqual(__A , __A )
@require_sudachi
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Any = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _lowercase ( self : Any ):
snake_case__ : List[Any] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def _lowercase ( self : List[Any] ):
snake_case__ : Any = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def _lowercase ( self : Dict ):
snake_case__ : Union[str, Any] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def _lowercase ( self : Dict ):
snake_case__ : Optional[Any] = SudachiTokenizer(do_lower_case=__A , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _lowercase ( self : List[Any] ):
snake_case__ : Tuple = SudachiTokenizer(normalize_text=__A , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def _lowercase ( self : str ):
snake_case__ : Optional[int] = SudachiTokenizer(trim_whitespace=__A , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def _lowercase ( self : Optional[int] ):
snake_case__ : Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(__A )
snake_case__ : List[str] = "こんにちは、世界。\nこんばんは、世界。"
snake_case__ : Optional[Any] = tokenizer.tokenize(__A )
self.assertListEqual(__A , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
snake_case__ : Union[str, Any] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__A , "wb" ) as handle:
pickle.dump(__A , __A )
with open(__A , "rb" ) as handle:
snake_case__ : Union[str, Any] = pickle.load(__A )
snake_case__ : Union[str, Any] = tokenizer_new.tokenize(__A )
self.assertListEqual(__A , __A )
@require_jumanpp
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Dict = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _lowercase ( self : Dict ):
snake_case__ : Dict = JumanppTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _lowercase ( self : Optional[int] ):
snake_case__ : int = JumanppTokenizer(normalize_text=__A )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _lowercase ( self : str ):
snake_case__ : List[str] = JumanppTokenizer(trim_whitespace=__A )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def _lowercase ( self : List[str] ):
snake_case__ : Any = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def _lowercase ( self : Any ):
snake_case__ : Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
snake_case__ : List[Any] = {}
for i, token in enumerate(__A ):
snake_case__ : str = i
snake_case__ : List[Any] = WordpieceTokenizer(vocab=__A , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Optional[int] = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
snake_case__ : Tuple = tokenizer.subword_tokenizer
snake_case__ : Union[str, Any] = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(__A , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
snake_case__ : Optional[int] = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(__A , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : List[str] = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
snake_case__ : Any = tokenizer.encode("ありがとう。" , add_special_tokens=__A )
snake_case__ : Optional[Any] = tokenizer.encode("どういたしまして。" , add_special_tokens=__A )
snake_case__ : List[str] = tokenizer.build_inputs_with_special_tokens(__A )
snake_case__ : Tuple = tokenizer.build_inputs_with_special_tokens(__A , __A )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = BertJapaneseTokenizer
a_ = False
def _lowercase ( self : Any ):
super().setUp()
snake_case__ : List[str] = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
snake_case__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _lowercase ( self : List[str] , **__A : Tuple ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **__A )
def _lowercase ( self : Tuple , __A : List[Any] ):
snake_case__ : str = "こんにちは、世界。 \nこんばんは、世界。"
snake_case__ : Optional[int] = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def _lowercase ( self : Union[str, Any] ):
pass # TODO add if relevant
def _lowercase ( self : Optional[Any] ):
pass # TODO add if relevant
def _lowercase ( self : Union[str, Any] ):
pass # TODO add if relevant
def _lowercase ( self : int ):
snake_case__ : Dict = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
snake_case__ : Optional[int] = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
__A , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__A ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def _lowercase ( self : Optional[int] ):
snake_case__ : Any = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
snake_case__ : Optional[Any] = {}
for i, token in enumerate(__A ):
snake_case__ : str = i
snake_case__ : List[str] = CharacterTokenizer(vocab=__A , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def _lowercase ( self : List[Any] ):
snake_case__ : Tuple = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
snake_case__ : List[Any] = tokenizer.encode("ありがとう。" , add_special_tokens=__A )
snake_case__ : List[Any] = tokenizer.encode("どういたしまして。" , add_special_tokens=__A )
snake_case__ : Any = tokenizer.build_inputs_with_special_tokens(__A )
snake_case__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__A , __A )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
snake_case__ : Any = "cl-tohoku/bert-base-japanese"
snake_case__ : Dict = AutoTokenizer.from_pretrained(__A )
self.assertIsInstance(__A , __A )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : int ):
snake_case__ : Union[str, Any] = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(__A )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
snake_case__ : Union[str, Any] = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(__A )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 286 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = PhobertTokenizer
a_ = False
def _lowercase ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Optional[int] = ["T@@", "i", "I", "R@@", "r", "e@@"]
snake_case__ : int = dict(zip(__A , range(len(__A ) ) ) )
snake_case__ : Dict = ["#version: 0.2", "l à</w>"]
snake_case__ : Optional[Any] = {"unk_token": "<unk>"}
snake_case__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def _lowercase ( self : List[str] , **__A : Any ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **__A )
def _lowercase ( self : Tuple , __A : List[Any] ):
snake_case__ : str = "Tôi là VinAI Research"
snake_case__ : int = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def _lowercase ( self : Optional[int] ):
snake_case__ : int = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : Tuple = "Tôi là VinAI Research"
snake_case__ : List[Any] = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
snake_case__ : int = tokenizer.tokenize(__A )
print(__A )
self.assertListEqual(__A , __A )
snake_case__ : Any = tokens + [tokenizer.unk_token]
snake_case__ : Any = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
| 286 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Tuple = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Dict = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307 | 1 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Optional[Any] , __a : Tuple=13 , __a : str=64 , __a : List[Any]=2 , __a : int=3 , __a : Union[str, Any]=True , __a : Tuple=True , __a : Tuple=32 , __a : Dict=5 , __a : str=4 , __a : List[str]=37 , __a : int="gelu" , __a : Union[str, Any]=0.1 , __a : List[Any]=0.1 , __a : Optional[int]=10 , __a : int=0.02 , __a : List[str]=[1, 16, 4, 4] , __a : Tuple=None , ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = parent
__lowercase : str = batch_size
__lowercase : Tuple = image_size
__lowercase : Any = patch_size
__lowercase : int = num_channels
__lowercase : List[str] = is_training
__lowercase : List[Any] = use_labels
__lowercase : Optional[Any] = hidden_size
__lowercase : Tuple = num_hidden_layers
__lowercase : List[str] = num_attention_heads
__lowercase : Optional[Any] = intermediate_size
__lowercase : Optional[int] = hidden_act
__lowercase : str = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : Optional[Any] = type_sequence_label_size
__lowercase : Dict = initializer_range
__lowercase : Optional[Any] = scope
__lowercase : int = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__lowercase : List[Any] = (self.image_size // 32) ** 2
__lowercase : Tuple = num_patches + 1
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : int = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : str = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__a , )
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : List[Any] , __a : Tuple ) -> str:
"""simple docstring"""
__lowercase : str = ViTHybridModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Any = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : List[str] , __a : int , __a : Any , __a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.type_sequence_label_size
__lowercase : List[str] = ViTHybridForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[int] = self.prepare_config_and_inputs()
__lowercase : List[Any] = config_and_inputs
__lowercase : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
_A : Optional[int] = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
_A : Any = False
_A : List[str] = False
_A : Any = False
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = ViTHybridModelTester(self )
__lowercase : Optional[int] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Union[str, Any] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[int] = model_class(__a )
__lowercase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : List[str] = [*signature.parameters.keys()]
__lowercase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : List[Any] = _config_zero_init(__a )
for model_class in self.all_model_classes:
__lowercase : Union[str, Any] = model_class(config=__a )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__lowercase : Optional[Any] = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[Any] = ViTHybridModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__a )
__lowercase : Union[str, Any] = self.default_image_processor
__lowercase : List[str] = prepare_img()
__lowercase : str = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : Any = model(**__a )
# verify the logits
__lowercase : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : Optional[int] = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
@require_accelerate
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
__lowercase : List[str] = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
__lowercase : int = prepare_img()
__lowercase : Tuple = image_processor(images=__a , return_tensors="""pt""" )
__lowercase : Any = model(**__a )
__lowercase : Dict = outputs.logits
# model predicts one of the 1000 ImageNet classes
__lowercase : str = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" ) | 356 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''pixel_values''']
def __init__( self : Any , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Dict = size if size is not None else {"""shortest_edge""": 224}
__lowercase : Union[str, Any] = get_size_dict(__a , default_to_square=__a )
__lowercase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__lowercase : Any = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" )
__lowercase : Optional[int] = do_resize
__lowercase : Union[str, Any] = size
__lowercase : List[Any] = resample
__lowercase : Any = do_center_crop
__lowercase : Dict = crop_size
__lowercase : int = do_rescale
__lowercase : Tuple = rescale_factor
__lowercase : List[Any] = do_normalize
__lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase : int = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase : Union[str, Any] = do_convert_rgb
def lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Dict = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__lowercase : str = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Tuple = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> List[str]:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize
__lowercase : Dict = size if size is not None else self.size
__lowercase : Tuple = get_size_dict(__a , param_name="""size""" , default_to_square=__a )
__lowercase : int = resample if resample is not None else self.resample
__lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size
__lowercase : List[str] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a )
__lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Tuple = image_mean if image_mean is not None else self.image_mean
__lowercase : str = image_std if image_std is not None else self.image_std
__lowercase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase : Union[str, Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase : Union[str, Any] = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
__lowercase : Any = [to_numpy_array(__a ) for image in images]
if do_resize:
__lowercase : str = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__lowercase : str = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__lowercase : Dict = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__lowercase : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__lowercase : Any = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a ) | 306 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowerCamelCase : Optional[Any] = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """facebook/nllb-200-distilled-600M"""
_snake_case = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
_snake_case = """translator"""
_snake_case = AutoTokenizer
_snake_case = AutoModelForSeqaSeqLM
_snake_case = LANGUAGE_CODES
_snake_case = ["""text""", """text""", """text"""]
_snake_case = ["""text"""]
def UpperCAmelCase ( self , A , A , A ) -> List[Any]:
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
snake_case : int = self.lang_to_code[src_lang]
snake_case : List[Any] = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
A , return_tensors="""pt""" , src_lang=A , tgt_lang=A )
def UpperCAmelCase ( self , A ) -> List[Any]:
return self.model.generate(**A )
def UpperCAmelCase ( self , A ) -> Any:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=A )
| 124 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase : Optional[int] = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowerCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 124 | 1 |
"""simple docstring"""
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[Any]=None, UpperCAmelCase__ : Optional[Any]=None ):
__lowercase = data
__lowercase = previous
__lowercase = next_node
def __str__( self : Tuple ):
return F"""{self.data}"""
def _lowercase ( self : str ):
return self.data
def _lowercase ( self : Any ):
return self.next
def _lowercase ( self : List[str] ):
return self.previous
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : str, UpperCAmelCase__ : Any ):
__lowercase = head
def __iter__( self : Union[str, Any] ):
return self
def _lowercase ( self : Tuple ):
if not self.current:
raise StopIteration
else:
__lowercase = self.current.get_data()
__lowercase = self.current.get_next()
return value
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : int ):
__lowercase = None # First node in list
__lowercase = None # Last node in list
def __str__( self : int ):
__lowercase = self.head
__lowercase = []
while current is not None:
nodes.append(current.get_data() )
__lowercase = current.get_next()
return " ".join(str(UpperCAmelCase__ ) for node in nodes )
def __contains__( self : Optional[int], UpperCAmelCase__ : int ):
__lowercase = self.head
while current:
if current.get_data() == value:
return True
__lowercase = current.get_next()
return False
def __iter__( self : Any ):
return LinkedListIterator(self.head )
def _lowercase ( self : Tuple ):
if self.head:
return self.head.get_data()
return None
def _lowercase ( self : Union[str, Any] ):
if self.tail:
return self.tail.get_data()
return None
def _lowercase ( self : Optional[int], UpperCAmelCase__ : Node ):
if self.head is None:
__lowercase = node
__lowercase = node
else:
self.insert_before_node(self.head, UpperCAmelCase__ )
def _lowercase ( self : List[Any], UpperCAmelCase__ : Node ):
if self.head is None:
self.set_head(UpperCAmelCase__ )
else:
self.insert_after_node(self.tail, UpperCAmelCase__ )
def _lowercase ( self : Optional[int], UpperCAmelCase__ : int ):
__lowercase = Node(UpperCAmelCase__ )
if self.head is None:
self.set_head(UpperCAmelCase__ )
else:
self.set_tail(UpperCAmelCase__ )
def _lowercase ( self : List[Any], UpperCAmelCase__ : Node, UpperCAmelCase__ : Node ):
__lowercase = node
__lowercase = node.previous
if node.get_previous() is None:
__lowercase = node_to_insert
else:
__lowercase = node_to_insert
__lowercase = node_to_insert
def _lowercase ( self : Dict, UpperCAmelCase__ : Node, UpperCAmelCase__ : Node ):
__lowercase = node
__lowercase = node.next
if node.get_next() is None:
__lowercase = node_to_insert
else:
__lowercase = node_to_insert
__lowercase = node_to_insert
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
__lowercase = 1
__lowercase = Node(UpperCAmelCase__ )
__lowercase = self.head
while node:
if current_position == position:
self.insert_before_node(UpperCAmelCase__, UpperCAmelCase__ )
return
current_position += 1
__lowercase = node.next
self.insert_after_node(self.tail, UpperCAmelCase__ )
def _lowercase ( self : Tuple, UpperCAmelCase__ : int ):
__lowercase = self.head
while node:
if node.get_data() == item:
return node
__lowercase = node.get_next()
raise Exception("Node not found" )
def _lowercase ( self : Tuple, UpperCAmelCase__ : Optional[int] ):
if (node := self.get_node(UpperCAmelCase__ )) is not None:
if node == self.head:
__lowercase = self.head.get_next()
if node == self.tail:
__lowercase = self.tail.get_previous()
self.remove_node_pointers(UpperCAmelCase__ )
@staticmethod
def _lowercase ( UpperCAmelCase__ : Node ):
if node.get_next():
__lowercase = node.previous
if node.get_previous():
__lowercase = node.next
__lowercase = None
__lowercase = None
def _lowercase ( self : Optional[Any] ):
return self.head is None
def _A ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 144 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _A ( ) -> None:
'''simple docstring'''
print("Making key files...")
make_key_files("rsa", 1024)
print("Key files generation successful.")
def _A ( UpperCamelCase_ : int) -> tuple[tuple[int, int], tuple[int, int]]:
'''simple docstring'''
print("Generating prime p...")
__lowercase = rabinMiller.generate_large_prime(UpperCamelCase_)
print("Generating prime q...")
__lowercase = rabinMiller.generate_large_prime(UpperCamelCase_)
__lowercase = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)...")
while True:
__lowercase = random.randrange(2 ** (key_size - 1), 2 ** (key_size))
if cryptoMath.gcd(UpperCamelCase_, (p - 1) * (q - 1)) == 1:
break
print("Calculating d that is mod inverse of e...")
__lowercase = cryptoMath.find_mod_inverse(UpperCamelCase_, (p - 1) * (q - 1))
__lowercase = (n, e)
__lowercase = (n, d)
return (public_key, private_key)
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : int) -> None:
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""") or os.path.exists(F"""{name}_privkey.txt"""):
print("\nWARNING:")
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program.")
sys.exit()
__lowercase ,__lowercase = generate_key(UpperCamelCase_)
print(F"""\nWriting public key to file {name}_pubkey.txt...""")
with open(F"""{name}_pubkey.txt""", "w") as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""")
print(F"""Writing private key to file {name}_privkey.txt...""")
with open(F"""{name}_privkey.txt""", "w") as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""")
if __name__ == "__main__":
main()
| 144 | 1 |
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
SCREAMING_SNAKE_CASE_: str =logging.getLogger(__name__)
SCREAMING_SNAKE_CASE_: str =list(MODEL_FOR_MASKED_LM_MAPPING.keys())
SCREAMING_SNAKE_CASE_: Tuple =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __A :
a__ : Optional[str] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
a__ : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCamelCase__ )} , )
a__ : Optional[str] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
a__ : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a__ : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a__ : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a__ : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a__ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a__ : bool = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _lowercase (self : str ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class __A :
a__ : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
a__ : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a__ : Optional[str] = field(default=UpperCamelCase__ , metadata={"""help""": """The input training data file (a text file)."""} )
a__ : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a__ : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
a__ : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
a__ : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a__ : Optional[int] = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
a__ : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
a__ : Optional[int] = field(
default=UpperCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a__ : float = field(
default=0.1_5 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
a__ : bool = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def _lowercase (self : List[str] ):
if self.train_file is not None:
UpperCAmelCase_ = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
UpperCAmelCase_ = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
with open(snake_case_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = [json.loads(snake_case_ ) for line in f.read().splitlines() if (len(snake_case_ ) > 0 and not line.isspace())]
assert len(snake_case_ ) == len(snake_case_ )
UpperCAmelCase_ = {c: dataset[c] for c in dataset.column_names}
UpperCAmelCase_ = refs
return Dataset.from_dict(snake_case_ )
def lowerCAmelCase_ ( ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
UpperCAmelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , snake_case_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCAmelCase_ = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
UpperCAmelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""train[:{data_args.validation_split_percentage}%]""" , )
UpperCAmelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""train[{data_args.validation_split_percentage}%:]""" , )
else:
UpperCAmelCase_ = {}
if data_args.train_file is not None:
UpperCAmelCase_ = data_args.train_file
if data_args.validation_file is not None:
UpperCAmelCase_ = data_args.validation_file
UpperCAmelCase_ = data_args.train_file.split("." )[-1]
if extension == "txt":
UpperCAmelCase_ = "text"
UpperCAmelCase_ = load_dataset(snake_case_ , data_files=snake_case_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCAmelCase_ = AutoConfig.from_pretrained(model_args.config_name , **snake_case_ )
elif model_args.model_name_or_path:
UpperCAmelCase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case_ )
else:
UpperCAmelCase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
UpperCAmelCase_ = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
UpperCAmelCase_ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **snake_case_ )
elif model_args.model_name_or_path:
UpperCAmelCase_ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **snake_case_ )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name." )
if model_args.model_name_or_path:
UpperCAmelCase_ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
UpperCAmelCase_ = AutoModelForMaskedLM.from_config(snake_case_ )
model.resize_token_embeddings(len(snake_case_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
UpperCAmelCase_ = datasets["train"].column_names
else:
UpperCAmelCase_ = datasets["validation"].column_names
UpperCAmelCase_ = "text" if "text" in column_names else column_names[0]
UpperCAmelCase_ = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(snake_case_ : int ):
# Remove empty lines
UpperCAmelCase_ = [line for line in examples["text"] if len(snake_case_ ) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=snake_case_ , truncation=snake_case_ , max_length=data_args.max_seq_length )
UpperCAmelCase_ = datasets.map(
snake_case_ , batched=snake_case_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
UpperCAmelCase_ = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
UpperCAmelCase_ = add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
UpperCAmelCase_ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
UpperCAmelCase_ = False
# Data collator
# This one will take care of randomly masking the tokens.
UpperCAmelCase_ = DataCollatorForWholeWordMask(tokenizer=snake_case_ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCAmelCase_ = Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=snake_case_ , data_collator=snake_case_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
UpperCAmelCase_ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
UpperCAmelCase_ = model_args.model_name_or_path
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = trainer.train(resume_from_checkpoint=snake_case_ )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCAmelCase_ = os.path.join(training_args.output_dir , "train_results.txt" )
if trainer.is_world_process_zero():
with open(snake_case_ , "w" ) as writer:
logger.info("***** Train results *****" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# Evaluation
UpperCAmelCase_ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase_ = trainer.evaluate()
UpperCAmelCase_ = math.exp(eval_output["eval_loss"] )
UpperCAmelCase_ = perplexity
UpperCAmelCase_ = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt" )
if trainer.is_world_process_zero():
with open(snake_case_ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in sorted(results.items() ):
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
return results
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 1 | '''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
_UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=lowerCAmelCase__ ).to(lowerCAmelCase__ )
_UpperCAmelCase : str = AutoTokenizer.from_pretrained("google/mt5-small" )
_UpperCAmelCase : str = tokenizer("Hello there" , return_tensors="pt" ).input_ids
_UpperCAmelCase : str = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
_UpperCAmelCase : Any = model(input_ids.to(lowerCAmelCase__ ) , labels=labels.to(lowerCAmelCase__ ) ).loss
_UpperCAmelCase : Dict = -(labels.shape[-1] * loss.item())
_UpperCAmelCase : Any = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 ) | 145 | 0 |
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any ={
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
SCREAMING_SNAKE_CASE_: Tuple ={value: key for key, value in encode_dict.items()}
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def lowerCAmelCase_ ( snake_case_ : int ) -> str:
'''simple docstring'''
if set(lowercase_ ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
UpperCAmelCase_ = ""
for word in coded.split():
while len(lowercase_ ) != 0:
decoded += decode_dict[word[:5]]
UpperCAmelCase_ = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 362 | '''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __A ( unittest.TestCase , UpperCamelCase__ ):
def _lowercase (self : Tuple ):
UpperCAmelCase_ = load_tool("text-to-speech" )
self.tool.setup()
def _lowercase (self : Union[str, Any] ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCAmelCase_ = self.tool("hey" )
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
def _lowercase (self : List[str] ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCAmelCase_ = self.tool("hey" )
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
| 106 | 0 |
'''simple docstring'''
import math
import sys
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = ""
try:
with open(_A , "rb" ) as binary_file:
UpperCAmelCase : Tuple = binary_file.read()
for dat in data:
UpperCAmelCase : Union[str, Any] = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = {"0": "0", "1": "1"}
UpperCAmelCase , UpperCAmelCase : Any = "", ""
UpperCAmelCase : Dict = len(_A )
for i in range(len(_A ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCAmelCase : Tuple = lexicon[curr_string]
result += last_match_id
UpperCAmelCase : Dict = last_match_id + "0"
if math.loga(_A ).is_integer():
UpperCAmelCase : List[Any] = {}
for curr_key in list(_A ):
UpperCAmelCase : str = lexicon.pop(_A )
UpperCAmelCase : Optional[Any] = new_lex
UpperCAmelCase : List[Any] = last_match_id + "1"
index += 1
UpperCAmelCase : Dict = ""
return result
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = 8
try:
with open(_A , "wb" ) as opened_file:
UpperCAmelCase : List[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(_A ) , _A )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_A , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCAmelCase : List[Any] = data_bits[counter:]
UpperCAmelCase : int = data_bits[counter + 1 :]
return data_bits
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = read_file_binary(_A )
UpperCAmelCase : Any = remove_prefix(_A )
UpperCAmelCase : int = decompress_data(_A )
write_file_binary(_A , _A )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 311 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __magic_name__ :
'''simple docstring'''
def __init__( self, lowercase_ ) -> List[str]:
"""simple docstring"""
a__ =data
a__ =[0X67452301, 0Xefcdab89, 0X98badcfe, 0X10325476, 0Xc3d2e1f0]
@staticmethod
def _UpperCAmelCase ( lowercase_, lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0Xffffffff
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
a__ =b'''\x80''' + b'''\x00''' * (63 - (len(self.data ) + 8) % 64)
a__ =self.data + padding + struct.pack('''>Q''', 8 * len(self.data ) )
return padded_data
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0, len(self.padded_data ), 64 )
]
def _UpperCAmelCase ( self, lowercase_ ) -> List[Any]:
"""simple docstring"""
a__ =list(struct.unpack('''>16L''', lowercase_ ) ) + [0] * 64
for i in range(16, 80 ):
a__ =self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1 )
return w
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.padding()
a__ =self.split_blocks()
for block in self.blocks:
a__ =self.expand_block(lowercase_ )
a__, a__, a__, a__, a__ =self.h
for i in range(0, 80 ):
if 0 <= i < 20:
a__ =(b & c) | ((~b) & d)
a__ =0X5a827999
elif 20 <= i < 40:
a__ =b ^ c ^ d
a__ =0X6ed9eba1
elif 40 <= i < 60:
a__ =(b & c) | (b & d) | (c & d)
a__ =0X8f1bbcdc
elif 60 <= i < 80:
a__ =b ^ c ^ d
a__ =0Xca62c1d6
a__, a__, a__, a__, a__ =(
self.rotate(lowercase_, 5 ) + f + e + k + expanded_block[i] & 0Xffffffff,
a,
self.rotate(lowercase_, 30 ),
c,
d,
)
a__ =(
self.h[0] + a & 0Xffffffff,
self.h[1] + b & 0Xffffffff,
self.h[2] + c & 0Xffffffff,
self.h[3] + d & 0Xffffffff,
self.h[4] + e & 0Xffffffff,
)
return ("{:08x}" * 5).format(*self.h )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ =b'''Test String'''
assert SHAaHash(_A ).final_hash() == hashlib.shaa(_A ).hexdigest() # noqa: S324
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ =argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
a__ =parser.parse_args()
a__ =args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
a__ =f.read()
else:
a__ =bytes(_A , '''utf-8''' )
print(SHAaHash(_A ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 188 | 0 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 359 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
lowerCamelCase_ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
lowerCamelCase_ = {
'''allenai/led-base-16384''': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __lowercase ( ) -> Dict:
'''simple docstring'''
_A = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
_A = bs[:]
_A = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowercase )
cs.append(2**8 + n )
n += 1
_A = [chr(__lowercase ) for n in cs]
return dict(zip(__lowercase , __lowercase ) )
def __lowercase ( __lowercase ) -> Dict:
'''simple docstring'''
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A = char
return pairs
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple="replace" , __UpperCAmelCase : Optional[int]="<s>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : List[Any]="</s>" , __UpperCAmelCase : Any="<s>" , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : Optional[int]="<pad>" , __UpperCAmelCase : Optional[Any]="<mask>" , __UpperCAmelCase : Any=False , **__UpperCAmelCase : Any , ):
'''simple docstring'''
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
with open(__UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
_A = json.load(__UpperCAmelCase )
_A = {v: k for k, v in self.encoder.items()}
_A = errors # how to handle errors in decoding
_A = bytes_to_unicode()
_A = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle:
_A = merges_handle.read().split("\n" )[1:-1]
_A = [tuple(merge.split() ) for merge in bpe_merges]
_A = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
_A = {}
_A = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_A = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_A = tuple(__UpperCAmelCase )
_A = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
_A = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(__UpperCAmelCase ):
try:
_A = word.index(__UpperCAmelCase , __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(__UpperCAmelCase )
_A = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
_A = get_pairs(__UpperCAmelCase )
_A = " ".join(__UpperCAmelCase )
_A = word
return word
def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ):
'''simple docstring'''
_A = []
for token in re.findall(self.pat , __UpperCAmelCase ):
_A = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCAmelCase ).split(" " ) )
return bpe_tokens
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Tuple ):
'''simple docstring'''
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
return self.decoder.get(__UpperCAmelCase )
def lowerCAmelCase ( self : str , __UpperCAmelCase : Dict ):
'''simple docstring'''
_A = "".join(__UpperCAmelCase )
_A = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + "\n" )
_A = 0
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
_A = token_index
writer.write(" ".join(__UpperCAmelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict=False , **__UpperCAmelCase : Any ):
'''simple docstring'''
_A = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCAmelCase ) > 0 and not text[0].isspace()):
_A = " " + text
return (text, kwargs)
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , ):
'''simple docstring'''
_A = super()._pad(
encoded_inputs=__UpperCAmelCase , max_length=__UpperCAmelCase , padding_strategy=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
_A = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_A = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_A = len(encoded_inputs["global_attention_mask"] ) != len(__UpperCAmelCase )
if needs_to_be_padded:
_A = len(__UpperCAmelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_A = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_A = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 174 | 0 |
from __future__ import annotations
lowerCAmelCase : str = [True] * 1_00_00_01
lowerCAmelCase : Union[str, Any] = 2
while i * i <= 1_00_00_00:
if seive[i]:
for j in range(i * i, 1_00_00_01, i):
lowerCAmelCase : int = False
i += 1
def A_ ( a ):
"""simple docstring"""
return seive[n]
def A_ ( a ):
"""simple docstring"""
return any(digit in '02468' for digit in str(a ) )
def A_ ( a = 1_0_0_0_0_0_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(a ) and not contains_an_even_digit(a ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = str(a )
SCREAMING_SNAKE_CASE_ : Dict = [int(str_num[j:] + str_num[:j] ) for j in range(len(a ) )]
if all(is_prime(a ) for i in list_nums ):
result.append(a )
return result
def A_ ( ):
"""simple docstring"""
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'{len(find_circular_primes()) = }')
| 253 |
import math
def A_ ( a , a = 0 , a = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = end or len(a )
for i in range(a , a ):
SCREAMING_SNAKE_CASE_ : List[Any] = i
SCREAMING_SNAKE_CASE_ : Optional[Any] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
SCREAMING_SNAKE_CASE_ : Tuple = array[temp_index - 1]
temp_index -= 1
SCREAMING_SNAKE_CASE_ : str = temp_index_value
return array
def A_ ( a , a , a ): # Max Heap
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = index
SCREAMING_SNAKE_CASE_ : str = 2 * index + 1 # Left Node
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
SCREAMING_SNAKE_CASE_ : Dict = left_index
if right_index < heap_size and array[largest] < array[right_index]:
SCREAMING_SNAKE_CASE_ : Any = right_index
if largest != index:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = array[largest], array[index]
heapify(a , a , a )
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = len(a )
for i in range(n // 2 , -1 , -1 ):
heapify(a , a , a )
for i in range(n - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = array[0], array[i]
heapify(a , 0 , a )
return array
def A_ ( a , a , a , a ):
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def A_ ( a , a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = low
SCREAMING_SNAKE_CASE_ : Tuple = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = array[j], array[i]
i += 1
def A_ ( a ):
"""simple docstring"""
if len(a ) == 0:
return array
SCREAMING_SNAKE_CASE_ : Any = 2 * math.ceil(math.loga(len(a ) ) )
SCREAMING_SNAKE_CASE_ : int = 1_6
return intro_sort(a , 0 , len(a ) , a , a )
def A_ ( a , a , a , a , a ):
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(a )
max_depth -= 1
SCREAMING_SNAKE_CASE_ : Optional[int] = median_of_a(a , a , start + ((end - start) // 2) + 1 , end - 1 )
SCREAMING_SNAKE_CASE_ : Dict = partition(a , a , a , a )
intro_sort(a , a , a , a , a )
SCREAMING_SNAKE_CASE_ : List[Any] = p
return insertion_sort(a , a , a )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : List[str] = input('Enter numbers separated by a comma : ').strip()
lowerCAmelCase : Optional[Any] = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 253 | 1 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
@slow
@require_torch
def __A ( self ) -> int:
A_ = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
A_ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
A_ = bertabert.config.encoder.vocab_size
A_ = tokenizer.sep_token_id
A_ = tokenizer.cls_token_id
A_ = 128
A_ = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
A_ = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
A_ = train_dataset.select(range(32 ) )
A_ = val_dataset.select(range(16 ) )
A_ = 4
def _map_to_encoder_decoder_inputs(_SCREAMING_SNAKE_CASE ):
# Tokenizer will automatically set [BOS] <text> [EOS]
A_ = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_SCREAMING_SNAKE_CASE , max_length=512 )
A_ = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_SCREAMING_SNAKE_CASE , max_length=128 )
A_ = inputs.input_ids
A_ = inputs.attention_mask
A_ = outputs.input_ids
A_ = outputs.input_ids.copy()
A_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
A_ = outputs.attention_mask
assert all(len(_SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids )
assert all(len(_SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_SCREAMING_SNAKE_CASE ):
A_ = pred.label_ids
A_ = pred.predictions
# all unnecessary tokens are removed
A_ = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
A_ = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
A_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_SCREAMING_SNAKE_CASE ) )] ) / len(_SCREAMING_SNAKE_CASE )
return {"accuracy": accuracy}
# map train dataset
A_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
A_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
A_ = self.get_auto_remove_tmp_dir()
A_ = SeqaSeqTrainingArguments(
output_dir=_SCREAMING_SNAKE_CASE , per_device_train_batch_size=_SCREAMING_SNAKE_CASE , per_device_eval_batch_size=_SCREAMING_SNAKE_CASE , predict_with_generate=_SCREAMING_SNAKE_CASE , evaluation_strategy='''steps''' , do_train=_SCREAMING_SNAKE_CASE , do_eval=_SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
A_ = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
# start training
trainer.train()
| 18 | '''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=0.6 , _SCREAMING_SNAKE_CASE=None , ) -> Tuple:
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = type_sequence_label_size
A_ = initializer_range
A_ = mask_ratio
A_ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A_ = (image_size // patch_size) ** 2
A_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __A ( self ) -> Union[str, Any]:
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Dict:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
A_ = ViTMAEModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
A_ = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
A_ = (self.image_size // self.patch_size) ** 2
A_ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A_ = 1
A_ = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ = model(_SCREAMING_SNAKE_CASE )
A_ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __A ( self ) -> int:
A_ = self.prepare_config_and_inputs()
A_ ,A_ ,A_ = config_and_inputs
A_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : int = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__lowercase : List[Any] = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
__lowercase : Union[str, Any] = False
__lowercase : List[Any] = False
__lowercase : List[str] = False
__lowercase : List[str] = False
def __A ( self ) -> Any:
A_ = ViTMAEModelTester(self )
A_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __A ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def __A ( self ) -> int:
pass
def __A ( self ) -> int:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def __A ( self ) -> int:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> Union[str, Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[int]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
# make masks reproducible
np.random.seed(2 )
A_ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
A_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A_ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A_ = pt_noise
super().check_pt_tf_models(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
A_ = outputs[0].cpu().numpy()
A_ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
A_ = model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Make sure we don't have nans
A_ = after_outputs[0].cpu().numpy()
A_ = 0
A_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __A ( self ) -> List[str]:
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __A ( self ) -> Dict:
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __A ( self ) -> Tuple:
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def __A ( self ) -> str:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self ) -> Union[str, Any]:
pass
@slow
def __A ( self ) -> Dict:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = ViTMAEModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( ) -> Dict:
A_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __A ( self ) -> List[str]:
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def __A ( self ) -> List[str]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
A_ = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(_SCREAMING_SNAKE_CASE )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A_ = ViTMAEConfig()
A_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A_ = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
A_ = model(**_SCREAMING_SNAKE_CASE , noise=torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ) )
# verify the logits
A_ = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
A_ = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(_SCREAMING_SNAKE_CASE ) , atol=1E-4 ) )
| 18 | 1 |
def lowercase_ ( _lowerCamelCase : int = 200_0000):
lowercase__ : List[Any] = [0 for i in range(n + 1)]
lowercase__ : Dict = 1
lowercase__ : List[Any] = 1
for i in range(2 , int(n**0.5) + 1):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , _lowerCamelCase):
lowercase__ : Union[str, Any] = 1
lowercase__ : List[Any] = 0
for i in range(_lowerCamelCase):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"{solution() = }")
| 87 | import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int):
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True])
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : str):
lowercase__ : Optional[int] = tmp_path / "cache"
lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Dict):
lowercase__ : List[Any] = tmp_path / "cache"
lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : List[Any] = features.copy() if features else default_expected_features
lowercase__ : List[Any] = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : List[str]):
lowercase__ : Optional[Any] = tmp_path / "cache"
lowercase__ : Tuple = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowercase__ : List[Any] = features.copy() if features else default_expected_features
lowercase__ : int = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int]):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowercase__ : Any = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
lowercase__ : str = features.copy()
lowercase__ : str = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Optional[int] = tmp_path / "cache"
lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"])
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]):
lowercase__ : Union[str, Any] = tmp_path / "cache"
lowercase__ : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list])
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int):
if issubclass(_lowerCamelCase , _lowerCamelCase):
lowercase__ : Tuple = jsonl_path
elif issubclass(_lowerCamelCase , _lowerCamelCase):
lowercase__ : str = [jsonl_path]
lowercase__ : str = tmp_path / "cache"
lowercase__ : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Tuple = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int]=("train",)):
assert isinstance(_lowerCamelCase , _lowerCamelCase)
for split in splits:
lowercase__ : Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True])
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : str):
lowercase__ : List[str] = tmp_path / "cache"
lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Optional[Any] = JsonDatasetReader({"train": jsonl_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[str]):
lowercase__ : str = tmp_path / "cache"
lowercase__ : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Tuple = features.copy() if features else default_expected_features
lowercase__ : Union[str, Any] = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Tuple = JsonDatasetReader({"train": jsonl_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"])
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
if split:
lowercase__ : Tuple = {split: jsonl_path}
else:
lowercase__ : Tuple = "train"
lowercase__ : int = {"train": jsonl_path, "test": jsonl_path}
lowercase__ : Dict = tmp_path / "cache"
lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return json.load(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[int]):
return [json.loads(_lowerCamelCase) for line in buffer]
class snake_case_ :
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def __UpperCamelCase ( self : List[Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ ).write()
buffer.seek(0 )
lowercase__ : Optional[int] = load_json_function(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
assert isinstance(exported_content[0] , lowercase_ )
assert len(lowercase_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def __UpperCamelCase ( self : str , lowercase_ : int , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Tuple ) -> List[str]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ ).write()
buffer.seek(0 )
lowercase__ : str = load_json(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowercase_ ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , num_proc=2 ).write()
buffer.seek(0 )
lowercase__ : str = load_json_function(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
assert isinstance(exported_content[0] , lowercase_ )
assert len(lowercase_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ , num_proc=2 ).write()
buffer.seek(0 )
lowercase__ : Optional[Any] = load_json(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowercase_ ) == 10
def __UpperCamelCase ( self : Dict , lowercase_ : List[str] ) -> str:
with pytest.raises(lowercase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : List[Any] ) -> Any:
lowercase__ : Dict = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}'''
lowercase__ : Optional[int] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(lowercase_ , lowercase_ , compression=lowercase_ ).write()
with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f:
lowercase__ : List[Any] = f.read()
with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f:
lowercase__ : str = f.read()
assert exported_content == original_content
| 87 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "roc_bert"
def __init__( self: Dict , __A: Optional[Any]=3_05_22 , __A: Dict=7_68 , __A: Optional[Any]=12 , __A: int=12 , __A: List[Any]=30_72 , __A: int="gelu" , __A: Optional[Any]=0.1 , __A: Tuple=0.1 , __A: int=5_12 , __A: Optional[Any]=2 , __A: Optional[Any]=0.02 , __A: Any=1e-12 , __A: Optional[Any]=True , __A: int=0 , __A: Union[str, Any]="absolute" , __A: Tuple=None , __A: Dict=True , __A: List[str]=True , __A: Union[str, Any]=7_68 , __A: List[str]=9_10 , __A: Any=5_12 , __A: List[Any]=2_48_58 , __A: int=True , **__A: str , ) -> Tuple:
_A = vocab_size
_A = max_position_embeddings
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = type_vocab_size
_A = layer_norm_eps
_A = use_cache
_A = enable_pronunciation
_A = enable_shape
_A = pronunciation_embed_dim
_A = pronunciation_vocab_size
_A = shape_embed_dim
_A = shape_vocab_size
_A = concat_input
_A = position_embedding_type
_A = classifier_dropout
super().__init__(pad_token_id=__A , **__A )
| 75 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = UnCLIPImageVariationPipeline
A_ = IMAGE_VARIATION_PARAMS - {"height", "width", "guidance_scale"}
A_ = IMAGE_VARIATION_BATCH_PARAMS
A_ = [
"generator",
"return_dict",
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
A_ = False
@property
def __A ( self: Optional[Any] ) -> Optional[Any]:
return 32
@property
def __A ( self: List[str] ) -> Dict:
return 32
@property
def __A ( self: List[str] ) -> List[str]:
return self.time_input_dim
@property
def __A ( self: Union[str, Any] ) -> Optional[int]:
return self.time_input_dim * 4
@property
def __A ( self: List[Any] ) -> Any:
return 1_00
@property
def __A ( self: List[str] ) -> Union[str, Any]:
_A = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __A ( self: Optional[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(__A )
@property
def __A ( self: List[str] ) -> int:
torch.manual_seed(0 )
_A = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(__A )
@property
def __A ( self: str ) -> List[str]:
torch.manual_seed(0 )
_A = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
_A = UnCLIPTextProjModel(**__A )
return model
@property
def __A ( self: Tuple ) -> str:
torch.manual_seed(0 )
_A = {
'''sample_size''': 32,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
_A = UNetaDConditionModel(**__A )
return model
@property
def __A ( self: Tuple ) -> Any:
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def __A ( self: List[Any] ) -> Any:
torch.manual_seed(0 )
_A = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def __A ( self: List[Any] ) -> Dict:
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
_A = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def __A ( self: List[str] ) -> str:
_A = self.dummy_decoder
_A = self.dummy_text_proj
_A = self.dummy_text_encoder
_A = self.dummy_tokenizer
_A = self.dummy_super_res_first
_A = self.dummy_super_res_last
_A = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=10_00 , )
_A = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=10_00 , )
_A = CLIPImageProcessor(crop_size=32 , size=32 )
_A = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def __A ( self: Dict , __A: List[str] , __A: Any=0 , __A: Union[str, Any]=True ) -> Optional[Any]:
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
if str(__A ).startswith('''mps''' ):
_A = torch.manual_seed(__A )
else:
_A = torch.Generator(device=__A ).manual_seed(__A )
if pil_image:
_A = input_image * 0.5 + 0.5
_A = input_image.clamp(0 , 1 )
_A = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_A = DiffusionPipeline.numpy_to_pil(__A )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def __A ( self: List[str] ) -> Union[str, Any]:
_A = '''cpu'''
_A = self.get_dummy_components()
_A = self.pipeline_class(**__A )
_A = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_A = self.get_dummy_inputs(__A , pil_image=__A )
_A = pipe(**__A )
_A = output.images
_A = self.get_dummy_inputs(__A , pil_image=__A )
_A = pipe(
**__A , return_dict=__A , )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array(
[
0.9_997,
0.0_002,
0.9_997,
0.9_997,
0.9_969,
0.0_023,
0.9_997,
0.9_969,
0.9_970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self: Optional[int] ) -> Tuple:
_A = '''cpu'''
_A = self.get_dummy_components()
_A = self.pipeline_class(**__A )
_A = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_A = self.get_dummy_inputs(__A , pil_image=__A )
_A = pipe(**__A )
_A = output.images
_A = self.get_dummy_inputs(__A , pil_image=__A )
_A = pipe(
**__A , return_dict=__A , )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array([0.9_997, 0.0_003, 0.9_997, 0.9_997, 0.9_970, 0.0_024, 0.9_997, 0.9_971, 0.9_971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self: Any ) -> Dict:
_A = '''cpu'''
_A = self.get_dummy_components()
_A = self.pipeline_class(**__A )
_A = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_A = self.get_dummy_inputs(__A , pil_image=__A )
_A = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
_A = pipe(**__A )
_A = output.images
_A = self.get_dummy_inputs(__A , pil_image=__A )
_A = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
_A = pipe(
**__A , return_dict=__A , )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
_A = np.array(
[
0.9_997,
0.9_989,
0.0_008,
0.0_021,
0.9_960,
0.0_018,
0.0_014,
0.0_002,
0.9_933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self: List[str] ) -> Tuple:
_A = torch.device('''cpu''' )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = 1
_A = self.get_dummy_components()
_A = self.pipeline_class(**__A )
_A = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_A = torch.Generator(device=__A ).manual_seed(0 )
_A = pipe.decoder.dtype
_A = 1
_A = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
_A = pipe.prepare_latents(
__A , dtype=__A , device=__A , generator=__A , latents=__A , scheduler=DummyScheduler() )
_A = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
_A = pipe.prepare_latents(
__A , dtype=__A , device=__A , generator=__A , latents=__A , scheduler=DummyScheduler() )
_A = self.get_dummy_inputs(__A , pil_image=__A )
_A = pipe(
**__A , decoder_latents=__A , super_res_latents=__A ).images
_A = self.get_dummy_inputs(__A , pil_image=__A )
# Don't pass image, instead pass embedding
_A = pipeline_inputs.pop('''image''' )
_A = pipe.image_encoder(__A ).image_embeds
_A = pipe(
**__A , decoder_latents=__A , super_res_latents=__A , image_embeddings=__A , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def __A ( self: Dict ) -> int:
_A = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
_A = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=__A , expected_max_diff=__A )
@skip_mps
def __A ( self: Any ) -> str:
_A = torch_device == '''cpu'''
_A = True
_A = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=__A , relax_max_difference=__A , additional_params_copy_to_batched_inputs=__A , )
def __A ( self: Dict ) -> Dict:
_A = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
_A = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__A , additional_params_copy_to_batched_inputs=__A , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__A )
@skip_mps
def __A ( self: Optional[int] ) -> Optional[Any]:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __A ( self: Any ) -> Any:
return super().test_save_load_local()
@skip_mps
def __A ( self: Tuple ) -> Union[str, Any]:
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Union[str, Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self: int ) -> List[str]:
_A = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
_A = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
_A = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
_A = pipeline.to(__A )
pipeline.set_progress_bar_config(disable=__A )
_A = torch.Generator(device='''cpu''' ).manual_seed(0 )
_A = pipeline(
__A , generator=__A , output_type='''np''' , )
_A = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert_mean_pixel_difference(__A , __A , 15 )
| 75 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizer
_SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Dict = {}
_SCREAMING_SNAKE_CASE : List[Any] = False
def __A ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCAmelCase : int = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__UpperCAmelCase : List[Any] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__UpperCAmelCase : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
__UpperCAmelCase : str = {'unk_token': '<unk>'}
__UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__UpperCAmelCase ) )
def __A ( self , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __A ( self , **__UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __A ( self , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Tuple = 'lower newer'
__UpperCAmelCase : Union[str, Any] = 'lower newer'
return input_text, output_text
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCAmelCase : int = 'lower newer'
__UpperCAmelCase : List[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
__UpperCAmelCase : List[str] = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__UpperCAmelCase : int = tokens + [tokenizer.unk_token]
__UpperCAmelCase : int = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
@require_ftfy
def __A ( self ) -> List[str]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCAmelCase : List[str] = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__UpperCAmelCase : int = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
__UpperCAmelCase : Dict = tokenizer_s.tokenize(__UpperCAmelCase )
__UpperCAmelCase : Optional[int] = tokenizer_r.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__UpperCAmelCase : Union[str, Any] = 'xa\u0303y' + ' ' + 'x\xe3y'
__UpperCAmelCase : List[str] = tokenizer_s.tokenize(__UpperCAmelCase )
__UpperCAmelCase : Any = tokenizer_r.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# Test that the tokenization is identical on unicode of space type
__UpperCAmelCase : Optional[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__UpperCAmelCase : Optional[Any] = tokenizer_s.tokenize(__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = tokenizer_r.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# Test that the tokenization is identical on unicode of line break type
__UpperCAmelCase : Union[str, Any] = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__UpperCAmelCase : Union[str, Any] = tokenizer_s.tokenize(__UpperCAmelCase )
__UpperCAmelCase : int = tokenizer_r.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __A ( self ) -> Dict:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCAmelCase : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCAmelCase : Tuple = f'{text_of_1_token} {text_of_1_token}'
__UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , )
__UpperCAmelCase : Optional[int] = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
__UpperCAmelCase : Union[str, Any] = f' {text}'
__UpperCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , )
__UpperCAmelCase : Tuple = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ) + 1, 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(__UpperCAmelCase ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def __A ( self ) -> Dict:
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def __A ( self ) -> Optional[int]:
'''simple docstring'''
pass
| 254 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowercase__( __UpperCamelCase: Union[dict, list, tuple, torch.Tensor] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = []
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(__UpperCamelCase ) )
elif isinstance(__UpperCamelCase ,(list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__UpperCamelCase ) )
elif isinstance(__UpperCamelCase ,torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: Tuple[int, ...] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = []
for d in reversed(__UpperCamelCase ):
idx.append(flat_idx % d )
SCREAMING_SNAKE_CASE : Tuple = flat_idx // d
return tuple(reversed(__UpperCamelCase ) )
@torch.jit.ignore
def lowercase__( __UpperCamelCase: Sequence[int] ,__UpperCamelCase: Sequence[int] ,__UpperCamelCase: Sequence[int] ,__UpperCamelCase: Optional[Sequence[bool]] = None ,__UpperCamelCase: Optional[Sequence[bool]] = None ,):
"""simple docstring"""
def reduce_edge_list(__UpperCamelCase: List[bool] ) -> None:
SCREAMING_SNAKE_CASE : List[str] = True
for i in range(len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
SCREAMING_SNAKE_CASE : str = l[reversed_idx]
if start_edges is None:
SCREAMING_SNAKE_CASE : int = [s == 0 for s in start]
reduce_edge_list(__UpperCamelCase )
if end_edges is None:
SCREAMING_SNAKE_CASE : Tuple = [e == (d - 1) for e, d in zip(__UpperCamelCase ,__UpperCamelCase )]
reduce_edge_list(__UpperCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__UpperCamelCase ) == 0:
return [()]
elif len(__UpperCamelCase ) == 1:
return [(slice(start[0] ,end[0] + 1 ),)]
SCREAMING_SNAKE_CASE : List[Tuple[slice, ...]] = []
SCREAMING_SNAKE_CASE : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__UpperCamelCase ,__UpperCamelCase ):
if s == e:
path_list.append(slice(__UpperCamelCase ,s + 1 ) )
else:
break
SCREAMING_SNAKE_CASE : Tuple[slice, ...] = tuple(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = len(__UpperCamelCase )
# start == end, and we're done
if divergence_idx == len(__UpperCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
SCREAMING_SNAKE_CASE : List[str] = start[divergence_idx]
return tuple(
path + (slice(__UpperCamelCase ,sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] ,[d - 1 for d in dims[divergence_idx + 1 :]] ,dims[divergence_idx + 1 :] ,start_edges=start_edges[divergence_idx + 1 :] ,end_edges=[True for _ in end_edges[divergence_idx + 1 :]] ,) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
SCREAMING_SNAKE_CASE : List[Any] = end[divergence_idx]
return tuple(
path + (slice(__UpperCamelCase ,edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] ,end[divergence_idx + 1 :] ,dims[divergence_idx + 1 :] ,start_edges=[True for _ in start_edges[divergence_idx + 1 :]] ,end_edges=end_edges[divergence_idx + 1 :] ,) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
SCREAMING_SNAKE_CASE : List[str] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowercase__( __UpperCamelCase: torch.Tensor ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = t.shape[:no_batch_dims]
SCREAMING_SNAKE_CASE : str = list(_flat_idx_to_idx(__UpperCamelCase ,__UpperCamelCase ) )
# _get_minimal_slice_set is inclusive
SCREAMING_SNAKE_CASE : int = list(_flat_idx_to_idx(flat_end - 1 ,__UpperCamelCase ) )
# Get an ordered list of slices to perform
SCREAMING_SNAKE_CASE : str = _get_minimal_slice_set(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,)
SCREAMING_SNAKE_CASE : Dict = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowercase__( __UpperCamelCase: Callable ,__UpperCamelCase: Dict[str, Any] ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: bool = False ,__UpperCamelCase: Any = None ,__UpperCamelCase: bool = False ,):
"""simple docstring"""
if not (len(__UpperCamelCase ) > 0):
raise ValueError('Must provide at least one input' )
SCREAMING_SNAKE_CASE : List[Any] = [shape[:no_batch_dims] for shape in _fetch_dims(__UpperCamelCase )]
SCREAMING_SNAKE_CASE : Optional[int] = tuple([max(__UpperCamelCase ) for s in zip(*__UpperCamelCase )] )
def _prep_inputs(__UpperCamelCase: torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
SCREAMING_SNAKE_CASE : str = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
SCREAMING_SNAKE_CASE : str = t.reshape(-1 ,*t.shape[no_batch_dims:] )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
SCREAMING_SNAKE_CASE : Dict[str, Any] = tensor_tree_map(_prep_inputs ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if _out is not None:
SCREAMING_SNAKE_CASE : Tuple = tensor_tree_map(lambda __UpperCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) ,_out )
SCREAMING_SNAKE_CASE : List[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
SCREAMING_SNAKE_CASE : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__UpperCamelCase: torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : List[str] = prepped_outputs
for _ in range(__UpperCamelCase ):
# Chunk the input
if not low_mem:
SCREAMING_SNAKE_CASE : List[str] = _select_chunk
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = partial(
_chunk_slice ,flat_start=__UpperCamelCase ,flat_end=min(__UpperCamelCase ,i + chunk_size ) ,no_batch_dims=len(__UpperCamelCase ) ,)
SCREAMING_SNAKE_CASE : Dict[str, Any] = tensor_tree_map(__UpperCamelCase ,__UpperCamelCase )
# Run the layer on the chunk
SCREAMING_SNAKE_CASE : int = layer(**__UpperCamelCase )
# Allocate space for the output
if out is None:
SCREAMING_SNAKE_CASE : List[Any] = tensor_tree_map(lambda __UpperCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) ,__UpperCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
def assign(__UpperCamelCase: dict ,__UpperCamelCase: dict ) -> None:
for k, v in da.items():
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
assign(__UpperCamelCase ,da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = da[k]
assign(__UpperCamelCase ,__UpperCamelCase )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
for xa, xa in zip(__UpperCamelCase ,__UpperCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
SCREAMING_SNAKE_CASE : List[str] = xa
elif isinstance(__UpperCamelCase ,torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
SCREAMING_SNAKE_CASE : Optional[int] = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
SCREAMING_SNAKE_CASE : Any = tensor_tree_map(lambda __UpperCamelCase : t.view(orig_batch_dims + t.shape[1:] ) ,__UpperCamelCase )
return out
class _a :
'''simple docstring'''
def __init__( self, A = 512, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = max_chunk_size
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[tuple] = None
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
SCREAMING_SNAKE_CASE : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size, 2 ) ) + 1 )]
SCREAMING_SNAKE_CASE : List[str] = [c for c in candidates if c > min_chunk_size]
SCREAMING_SNAKE_CASE : Optional[int] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(A ) -> bool:
try:
with torch.no_grad():
fn(*A, chunk_size=A )
return True
except RuntimeError:
return False
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = len(A ) - 1
while i > min_viable_chunk_size_index:
SCREAMING_SNAKE_CASE : Any = test_chunk_size(candidates[i] )
if not viable:
SCREAMING_SNAKE_CASE : List[Any] = (min_viable_chunk_size_index + i) // 2
else:
SCREAMING_SNAKE_CASE : Optional[Any] = i
SCREAMING_SNAKE_CASE : List[Any] = (i + len(A ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = True
for aa, aa in zip(A, A ):
assert type(A ) == type(A )
if isinstance(A, (list, tuple) ):
consistent &= self._compare_arg_caches(A, A )
elif isinstance(A, A ):
SCREAMING_SNAKE_CASE : Optional[Any] = [v for _, v in sorted(aa.items(), key=lambda A : x[0] )]
SCREAMING_SNAKE_CASE : Optional[int] = [v for _, v in sorted(aa.items(), key=lambda A : x[0] )]
consistent &= self._compare_arg_caches(A, A )
else:
consistent &= aa == aa
return consistent
def UpperCamelCase_ ( self, A, A, A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : tuple = tree_map(lambda A : a.shape if isinstance(A, torch.Tensor ) else a, A, A )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(A )
SCREAMING_SNAKE_CASE : str = self._compare_arg_caches(self.cached_arg_data, A )
else:
# Otherwise, we can reuse the precomputed value
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if not consistent:
SCREAMING_SNAKE_CASE : Any = self._determine_favorable_chunk_size(
A, A, A, )
SCREAMING_SNAKE_CASE : Dict = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 251 | 0 |
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def a__ ( lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : Tuple = {}
UpperCAmelCase__ : Optional[int] = job['''started_at''']
UpperCAmelCase__ : Optional[int] = job['''completed_at''']
UpperCAmelCase__ : List[Any] = date_parser.parse(lowerCAmelCase__ )
UpperCAmelCase__ : Dict = date_parser.parse(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
UpperCAmelCase__ : Union[str, Any] = start
UpperCAmelCase__ : Optional[Any] = end
UpperCAmelCase__ : List[str] = duration_in_min
return job_info
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> Union[str, Any]:
UpperCAmelCase__ : Tuple = None
if token is not None:
UpperCAmelCase__ : Tuple = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""}
UpperCAmelCase__ : int = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
UpperCAmelCase__ : Any = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).json()
UpperCAmelCase__ : List[str] = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(lowerCAmelCase__ ) for job in result['''jobs''']} )
UpperCAmelCase__ : Tuple = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(lowerCAmelCase__ ):
UpperCAmelCase__ : str = requests.get(url + F"""&page={i + 2}""" , headers=lowerCAmelCase__ ).json()
job_time.update({job['''name''']: extract_time_from_single_job(lowerCAmelCase__ ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = get_job_time(args.workflow_run_id)
UpperCamelCase__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"""{k}: {v['duration']}""")
| 365 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase__ = 1_6
UpperCamelCase__ = 3_2
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 16 ) -> Dict:
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase__ : str = DatasetDict(
{
'''train''': dataset['''train'''].select(lowerCAmelCase__ ),
'''validation''': dataset['''train'''].select(lowerCAmelCase__ ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(lowerCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ : Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase__ : Dict = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase__ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase__ : Any = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase__ : Dict = 8
else:
UpperCAmelCase__ : List[Any] = None
return tokenizer.pad(
lowerCAmelCase__ , padding='''longest''' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase__ : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = DataLoader(
tokenized_datasets['''test'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader, test_dataloader
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
# New Code #
UpperCAmelCase__ : List[str] = []
# Download the dataset
UpperCAmelCase__ : Union[str, Any] = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
UpperCAmelCase__ : str = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
UpperCAmelCase__ : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ : Any = config['''lr''']
UpperCAmelCase__ : Any = int(config['''num_epochs'''] )
UpperCAmelCase__ : Any = int(config['''seed'''] )
UpperCAmelCase__ : Dict = int(config['''batch_size'''] )
UpperCAmelCase__ : Any = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase__ : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase__ : Any = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase__ : List[Any] = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase__ )
# New Code #
# Create our folds:
UpperCAmelCase__ : Union[str, Any] = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
UpperCAmelCase__ : Dict = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase__ ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = get_fold_dataloaders(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase__ : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=lowerCAmelCase__ )
# Instantiate scheduler
UpperCAmelCase__ : Any = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=1_00 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Now we train the model
for epoch in range(lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase__ : Union[str, Any] = model(**lowerCAmelCase__ )
UpperCAmelCase__ : Dict = outputs.loss
UpperCAmelCase__ : Dict = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ : str = model(**lowerCAmelCase__ )
UpperCAmelCase__ : Any = outputs.logits.argmax(dim=-1 )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
UpperCAmelCase__ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowerCAmelCase__ )
# New Code #
# We also run predictions on the test set at the very end
UpperCAmelCase__ : int = []
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ : str = model(**lowerCAmelCase__ )
UpperCAmelCase__ : Union[str, Any] = outputs.logits
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCAmelCase__ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
UpperCAmelCase__ : Union[str, Any] = torch.cat(lowerCAmelCase__ , dim=0 )
UpperCAmelCase__ : Tuple = torch.stack(lowerCAmelCase__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
UpperCAmelCase__ : Optional[Any] = metric.compute(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )
accelerator.print('''Average test metrics from all folds:''' , lowerCAmelCase__ )
def a__ ( ) -> Any:
UpperCAmelCase__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=lowerCAmelCase__ , default=3 , help='''The number of splits to perform across the dataset''' )
UpperCAmelCase__ : Tuple = parser.parse_args()
UpperCAmelCase__ : Any = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 299 | 0 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=5_12,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def SCREAMING_SNAKE_CASE__ ( __a ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 327 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(UpperCAmelCase_ )
def UpperCamelCase ( self: str , **UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
# preprocess args
if "points_per_batch" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self: Optional[Any] , UpperCAmelCase_: Tuple , *UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=None , UpperCAmelCase_: Tuple=None , **UpperCAmelCase_: Any ):
'''simple docstring'''
return super().__call__(UpperCAmelCase_ , *UpperCAmelCase_ , num_workers=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict=64 , UpperCAmelCase_: int = 0 , UpperCAmelCase_: float = 512 / 1_500 , UpperCAmelCase_: Optional[int] = 32 , UpperCAmelCase_: Optional[int] = 1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_image(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor.size["""longest_edge"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.generate_crop_boxes(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
_SCREAMING_SNAKE_CASE = self.get_inference_context()
with inference_context():
_SCREAMING_SNAKE_CASE = self._ensure_tensor_on_device(UpperCAmelCase_ , device=self.device )
_SCREAMING_SNAKE_CASE = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
_SCREAMING_SNAKE_CASE = image_embeddings
_SCREAMING_SNAKE_CASE = grid_points.shape[1]
_SCREAMING_SNAKE_CASE = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = grid_points[:, i : i + points_per_batch, :, :]
_SCREAMING_SNAKE_CASE = input_labels[:, i : i + points_per_batch]
_SCREAMING_SNAKE_CASE = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=0.88 , UpperCAmelCase_: Dict=0.95 , UpperCAmelCase_: Tuple=0 , UpperCAmelCase_: str=1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = model_inputs.pop("""input_boxes""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""is_last""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""original_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = self.model(**UpperCAmelCase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_SCREAMING_SNAKE_CASE = model_outputs["""pred_masks"""]
_SCREAMING_SNAKE_CASE = self.image_processor.post_process_masks(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , binarize=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model_outputs["""iou_scores"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=False , UpperCAmelCase_: Any=0.7 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.post_process_for_mask_generation(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = defaultdict(UpperCAmelCase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {}
if output_rle_mask:
_SCREAMING_SNAKE_CASE = rle_mask
if output_bboxes_mask:
_SCREAMING_SNAKE_CASE = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 306 | 0 |
from __future__ import annotations
class __magic_name__ :
def __init__( self : Union[str, Any] , lowerCamelCase__ : int ) -> None:
'''simple docstring'''
UpperCamelCase__ : int = order
# a_{0} ... a_{k}
UpperCamelCase__ : int = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCamelCase__ : Tuple = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCamelCase__ : Tuple = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCamelCase__ : List[str] = [0.0] * self.order
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : list[float] , lowerCamelCase__ : list[float] ) -> None:
'''simple docstring'''
if len(lowerCamelCase__ ) < self.order:
UpperCamelCase__ : int = [1.0, *a_coeffs]
if len(lowerCamelCase__ ) != self.order + 1:
UpperCamelCase__ : Dict = (
F"Expected a_coeffs to have {self.order + 1} elements "
F"for {self.order}-order filter, got {len(lowerCamelCase__ )}"
)
raise ValueError(lowerCamelCase__ )
if len(lowerCamelCase__ ) != self.order + 1:
UpperCamelCase__ : Dict = (
F"Expected b_coeffs to have {self.order + 1} elements "
F"for {self.order}-order filter, got {len(lowerCamelCase__ )}"
)
raise ValueError(lowerCamelCase__ )
UpperCamelCase__ : int = a_coeffs
UpperCamelCase__ : Any = b_coeffs
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : float ) -> float:
'''simple docstring'''
UpperCamelCase__ : List[str] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCamelCase__ : Union[str, Any] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCamelCase__ : List[Any] = self.input_history[:-1]
UpperCamelCase__ : Optional[int] = self.output_history[:-1]
UpperCamelCase__ : List[Any] = sample
UpperCamelCase__ : Any = result
return result
| 365 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__UpperCamelCase : Optional[int] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
__UpperCamelCase : Optional[int] = parser.parse_args()
__UpperCamelCase : Union[str, Any] = "cpu"
__UpperCamelCase : Dict = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
__UpperCamelCase : int = "path-to-your-trained-model"
__UpperCamelCase : List[str] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__UpperCamelCase : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__UpperCamelCase : Optional[Any] = pipe.to(device)
# to channels last
__UpperCamelCase : Tuple = pipe.unet.to(memory_format=torch.channels_last)
__UpperCamelCase : Optional[int] = pipe.vae.to(memory_format=torch.channels_last)
__UpperCamelCase : int = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__UpperCamelCase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__UpperCamelCase : Tuple = torch.randn(2, 4, 64, 64)
__UpperCamelCase : Any = torch.rand(1) * 999
__UpperCamelCase : Any = torch.randn(2, 77, 768)
__UpperCamelCase : List[Any] = (sample, timestep, encoder_hidden_status)
try:
__UpperCamelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__UpperCamelCase : str = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__UpperCamelCase : Tuple = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__UpperCamelCase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__UpperCamelCase : List[Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__UpperCamelCase : Optional[Any] = 666
__UpperCamelCase : int = torch.Generator(device).manual_seed(seed)
__UpperCamelCase : int = {"generator": generator}
if args.steps is not None:
__UpperCamelCase : str = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__UpperCamelCase : str = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 51 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :jnp.ndarray
_UpperCAmelCase :jnp.ndarray
class lowercase__ ( nn.Module ):
_UpperCAmelCase :int
_UpperCAmelCase :Tuple[int] = (16, 32, 96, 256)
_UpperCAmelCase :jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : Optional[int] =nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase_ : Any =[]
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase_ : Union[str, Any] =self.block_out_channels[i]
lowerCamelCase_ : Any =self.block_out_channels[i + 1]
lowerCamelCase_ : List[str] =nn.Conv(
snake_case__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case__ )
lowerCamelCase_ : List[str] =nn.Conv(
snake_case__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case__ )
lowerCamelCase_ : Union[str, Any] =blocks
lowerCamelCase_ : Optional[Any] =nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Tuple , snake_case__ : Union[str, Any] ):
lowerCamelCase_ : int =self.conv_in(snake_case__ )
lowerCamelCase_ : List[Any] =nn.silu(snake_case__ )
for block in self.blocks:
lowerCamelCase_ : Union[str, Any] =block(snake_case__ )
lowerCamelCase_ : List[str] =nn.silu(snake_case__ )
lowerCamelCase_ : Tuple =self.conv_out(snake_case__ )
return embedding
@flax_register_to_config
class lowercase__ ( nn.Module, snake_case__, snake_case__ ):
_UpperCAmelCase :int = 32
_UpperCAmelCase :int = 4
_UpperCAmelCase :Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCAmelCase :Union[bool, Tuple[bool]] = False
_UpperCAmelCase :Tuple[int] = (320, 640, 1280, 1280)
_UpperCAmelCase :int = 2
_UpperCAmelCase :Union[int, Tuple[int]] = 8
_UpperCAmelCase :Optional[Union[int, Tuple[int]]] = None
_UpperCAmelCase :int = 1280
_UpperCAmelCase :float = 0.0
_UpperCAmelCase :bool = False
_UpperCAmelCase :jnp.dtype = jnp.floataa
_UpperCAmelCase :bool = True
_UpperCAmelCase :int = 0
_UpperCAmelCase :str = "rgb"
_UpperCAmelCase :Tuple[int] = (16, 32, 96, 256)
def UpperCAmelCase__ ( self : int , snake_case__ : jax.random.KeyArray ):
# init input tensors
lowerCamelCase_ : str =(1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase_ : List[Any] =jnp.zeros(snake_case__ , dtype=jnp.floataa )
lowerCamelCase_ : int =jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase_ : Union[str, Any] =jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase_ : Optional[int] =(1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase_ : Any =jnp.zeros(snake_case__ , dtype=jnp.floataa )
lowerCamelCase_ , lowerCamelCase_ : Any =jax.random.split(snake_case__ )
lowerCamelCase_ : Tuple ={"params": params_rng, "dropout": dropout_rng}
return self.init(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )["params"]
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : Union[str, Any] =self.block_out_channels
lowerCamelCase_ : Optional[int] =block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase_ : int =self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase_ : Any =nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase_ : Union[str, Any] =FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase_ : List[Any] =FlaxTimestepEmbedding(snake_case__ , dtype=self.dtype )
lowerCamelCase_ : List[str] =FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCamelCase_ : Optional[int] =self.only_cross_attention
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ : str =(only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ : Any =(num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase_ : Optional[int] =[]
lowerCamelCase_ : Optional[Any] =[]
lowerCamelCase_ : str =block_out_channels[0]
lowerCamelCase_ : str =nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case__ )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase_ : Union[str, Any] =output_channel
lowerCamelCase_ : Tuple =block_out_channels[i]
lowerCamelCase_ : List[Any] =i == len(snake_case__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase_ : Tuple =FlaxCrossAttnDownBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCamelCase_ : Union[str, Any] =FlaxDownBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case__ )
for _ in range(self.layers_per_block ):
lowerCamelCase_ : List[Any] =nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case__ )
if not is_final_block:
lowerCamelCase_ : Any =nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case__ )
lowerCamelCase_ : List[str] =down_blocks
lowerCamelCase_ : int =controlnet_down_blocks
# mid
lowerCamelCase_ : int =block_out_channels[-1]
lowerCamelCase_ : str =FlaxUNetMidBlockaDCrossAttn(
in_channels=snake_case__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCamelCase_ : List[str] =nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Tuple , snake_case__ : Dict , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : float = 1.0 , snake_case__ : bool = True , snake_case__ : bool = False , ):
lowerCamelCase_ : int =self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase_ : Optional[Any] =jnp.flip(snake_case__ , axis=1 )
# 1. time
if not isinstance(snake_case__ , jnp.ndarray ):
lowerCamelCase_ : Dict =jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase_ : Any =timesteps.astype(dtype=jnp.floataa )
lowerCamelCase_ : Optional[Any] =jnp.expand_dims(snake_case__ , 0 )
lowerCamelCase_ : Any =self.time_proj(snake_case__ )
lowerCamelCase_ : Union[str, Any] =self.time_embedding(snake_case__ )
# 2. pre-process
lowerCamelCase_ : List[str] =jnp.transpose(snake_case__ , (0, 2, 3, 1) )
lowerCamelCase_ : Union[str, Any] =self.conv_in(snake_case__ )
lowerCamelCase_ : List[str] =jnp.transpose(snake_case__ , (0, 2, 3, 1) )
lowerCamelCase_ : str =self.controlnet_cond_embedding(snake_case__ )
sample += controlnet_cond
# 3. down
lowerCamelCase_ : List[str] =(sample,)
for down_block in self.down_blocks:
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =down_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train )
else:
lowerCamelCase_ , lowerCamelCase_ : Optional[int] =down_block(snake_case__ , snake_case__ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase_ : Optional[int] =self.mid_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train )
# 5. contronet blocks
lowerCamelCase_ : Dict =()
for down_block_res_sample, controlnet_block in zip(snake_case__ , self.controlnet_down_blocks ):
lowerCamelCase_ : Dict =controlnet_block(snake_case__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase_ : List[Any] =controlnet_down_block_res_samples
lowerCamelCase_ : Tuple =self.controlnet_mid_block(snake_case__ )
# 6. scaling
lowerCamelCase_ : Dict =[sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=snake_case__ , mid_block_res_sample=snake_case__ )
| 144 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :torch.FloatTensor
class lowercase__ ( snake_case__, snake_case__ ):
@register_to_config
def __init__( self : Optional[int] , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : Tuple[str] = ("DownEncoderBlock2D",) , snake_case__ : Tuple[str] = ("UpDecoderBlock2D",) , snake_case__ : Tuple[int] = (64,) , snake_case__ : int = 1 , snake_case__ : str = "silu" , snake_case__ : int = 3 , snake_case__ : int = 32 , snake_case__ : int = 256 , snake_case__ : int = 32 , snake_case__ : Optional[int] = None , snake_case__ : float = 0.18_215 , snake_case__ : str = "group" , ):
super().__init__()
# pass init params to Encoder
lowerCamelCase_ : List[str] =Encoder(
in_channels=snake_case__ , out_channels=snake_case__ , down_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , double_z=snake_case__ , )
lowerCamelCase_ : Union[str, Any] =vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCamelCase_ : List[Any] =nn.Convad(snake_case__ , snake_case__ , 1 )
lowerCamelCase_ : int =VectorQuantizer(snake_case__ , snake_case__ , beta=0.25 , remap=snake_case__ , sane_index_shape=snake_case__ )
lowerCamelCase_ : int =nn.Convad(snake_case__ , snake_case__ , 1 )
# pass init params to Decoder
lowerCamelCase_ : Union[str, Any] =Decoder(
in_channels=snake_case__ , out_channels=snake_case__ , up_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , norm_type=snake_case__ , )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
lowerCamelCase_ : int =self.encoder(snake_case__ )
lowerCamelCase_ : Union[str, Any] =self.quant_conv(snake_case__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=snake_case__ )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : torch.FloatTensor , snake_case__ : bool = False , snake_case__ : bool = True ):
# also go through quantization layer
if not force_not_quantize:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict =self.quantize(snake_case__ )
else:
lowerCamelCase_ : List[Any] =h
lowerCamelCase_ : List[Any] =self.post_quant_conv(snake_case__ )
lowerCamelCase_ : Dict =self.decoder(snake_case__ , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
def UpperCAmelCase__ ( self : Any , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
lowerCamelCase_ : Dict =sample
lowerCamelCase_ : Optional[Any] =self.encode(snake_case__ ).latents
lowerCamelCase_ : str =self.decode(snake_case__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
| 144 | 1 |
def UpperCamelCase ( __lowercase : Optional[int] ):
'''simple docstring'''
A_ : Optional[int] = [0] * len(__lowerCAmelCase )
for i in range(1 ,len(__lowerCAmelCase ) ):
# use last results for better performance - dynamic programming
A_ : Dict = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
A_ : Optional[int] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
A_ : Dict = j
return prefix_result
def UpperCamelCase ( __lowercase : Any ):
'''simple docstring'''
return max(prefix_function(__lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 | from __future__ import annotations
import requests
_UpperCAmelCase = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def UpperCamelCase ( __lowercase : str ,__lowercase : int = 1 ,__lowercase : str = "new" ,__lowercase : list | None = None ):
'''simple docstring'''
A_ : Tuple = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__lowercase ) - valid_terms ) ):
A_ : int = f'''Invalid search term: {invalid_search_terms}'''
raise ValueError(__lowercase )
A_ : Optional[int] = requests.get(
f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' ,headers={'User-agent': 'A random string'} ,)
if response.status_code == 4_29:
raise requests.HTTPError
A_ : Optional[Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__lowercase )}
A_ : Union[str, Any] = {}
for id_ in range(__lowercase ):
A_ : List[str] = {
item: data['data']['children'][id_]['data'][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 192 | 0 |
'''simple docstring'''
import pprint
import requests
__a: List[Any] = '''https://zenquotes.io/api'''
def __UpperCamelCase ( ):
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def __UpperCamelCase ( ):
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
__a: Union[str, Any] = random_quotes()
pprint.pprint(response)
| 198 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=0.2 , lowercase=0.2 ):
A_ : str = bp_numa
A_ : Optional[int] = bp_numa
A_ : Optional[Any] = bp_numa
A_ : str = conva_get[:2]
A_ : Union[str, Any] = conva_get[2]
A_ : Union[str, Any] = size_pa
A_ : List[str] = rate_w
A_ : Dict = rate_t
A_ : Dict = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
A_ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
A_ : Tuple = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
A_ : List[Any] = -2 * np.random.rand(self.conva[1] ) + 1
A_ : Dict = -2 * np.random.rand(self.num_bpa ) + 1
A_ : Any = -2 * np.random.rand(self.num_bpa ) + 1
def _a (self , lowercase ):
# save model dict with pickle
A_ : Union[str, Any] = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(lowercase , """wb""" ) as f:
pickle.dump(lowercase , lowercase )
print(F'Model saved: {save_path}' )
@classmethod
def _a (cls , lowercase ):
# read saved model
with open(lowercase , """rb""" ) as f:
A_ : Optional[int] = pickle.load(lowercase ) # noqa: S301
A_ : Optional[int] = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
A_ : Tuple = model_dic.get("""size_pooling1""" )
A_ : Optional[Any] = model_dic.get("""num_bp1""" )
A_ : List[str] = model_dic.get("""num_bp2""" )
A_ : Dict = model_dic.get("""num_bp3""" )
A_ : Tuple = model_dic.get("""rate_weight""" )
A_ : List[Any] = model_dic.get("""rate_thre""" )
# create model instance
A_ : List[str] = CNN(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
# modify model parameter
A_ : int = model_dic.get("""w_conv1""" )
A_ : str = model_dic.get("""wkj""" )
A_ : str = model_dic.get("""vji""" )
A_ : int = model_dic.get("""thre_conv1""" )
A_ : Union[str, Any] = model_dic.get("""thre_bp2""" )
A_ : List[Any] = model_dic.get("""thre_bp3""" )
return conv_ins
def _a (self , lowercase ):
return 1 / (1 + np.exp(-1 * x ))
def _a (self , lowercase ):
return round(lowercase , 3 )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase ):
# convolution process
A_ : Dict = convs[0]
A_ : Any = convs[1]
A_ : Tuple = np.shape(lowercase )[0]
# get the data slice of original image data, data_focus
A_ : List[str] = []
for i_focus in range(0 , size_data - size_conv + 1 , lowercase ):
for j_focus in range(0 , size_data - size_conv + 1 , lowercase ):
A_ : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
A_ : int = []
A_ : List[str] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowercase ):
A_ : List[Any] = []
for i_focus in range(len(lowercase ) ):
A_ : List[str] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowercase ) )
A_ : Tuple = np.asmatrix(lowercase ).reshape(
lowercase , lowercase )
data_featuremap.append(lowercase )
# expanding the data slice to One dimenssion
A_ : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowercase ) )
A_ : Dict = np.asarray(lowercase )
return focus_list, data_featuremap
def _a (self , lowercase , lowercase , lowercase="average_pool" ):
# pooling process
A_ : Union[str, Any] = len(featuremaps[0] )
A_ : str = int(size_map / size_pooling )
A_ : List[str] = []
for i_map in range(len(lowercase ) ):
A_ : Any = featuremaps[i_map]
A_ : Any = []
for i_focus in range(0 , lowercase , lowercase ):
for j_focus in range(0 , lowercase , lowercase ):
A_ : Tuple = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowercase ) )
A_ : List[str] = np.asmatrix(lowercase ).reshape(lowercase , lowercase )
featuremap_pooled.append(lowercase )
return featuremap_pooled
def _a (self , lowercase ):
# expanding three dimension data to one dimension list
A_ : List[Any] = []
for i in range(len(lowercase ) ):
A_ : Tuple = np.shape(data[i] )
A_ : str = data[i].reshape(1 , shapes[0] * shapes[1] )
A_ : Tuple = data_listed.getA().tolist()[0]
data_expanded.extend(lowercase )
A_ : Optional[Any] = np.asarray(lowercase )
return data_expanded
def _a (self , lowercase ):
# expanding matrix to one dimension list
A_ : str = np.asarray(lowercase )
A_ : Any = np.shape(lowercase )
A_ : int = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : List[str] = []
A_ : Union[str, Any] = 0
for i_map in range(lowercase ):
A_ : Union[str, Any] = np.ones((size_map, size_map) )
for i in range(0 , lowercase , lowercase ):
for j in range(0 , lowercase , lowercase ):
A_ : str = pd_pool[
i_pool
]
A_ : Optional[Any] = i_pool + 1
A_ : Union[str, Any] = np.multiply(
lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(lowercase )
return pd_all
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=bool ):
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowercase )) )
print((""" - - Shape: Teach_Data """, np.shape(lowercase )) )
A_ : Optional[Any] = 0
A_ : Dict = []
A_ : List[Any] = 10000
while rp < n_repeat and mse >= error_accuracy:
A_ : List[Any] = 0
print(F'-------------Learning Time {rp}--------------' )
for p in range(len(lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
A_ : Optional[Any] = np.asmatrix(datas_train[p] )
A_ : str = np.asarray(datas_teach[p] )
A_, A_ : Dict = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A_ : Any = self.pooling(lowercase , self.size_poolinga )
A_ : int = np.shape(lowercase )
A_ : Union[str, Any] = self._expand(lowercase )
A_ : Dict = data_bp_input
A_ : int = np.dot(lowercase , self.vji.T ) - self.thre_bpa
A_ : Any = self.sig(lowercase )
A_ : Optional[int] = np.dot(lowercase , self.wkj.T ) - self.thre_bpa
A_ : List[str] = self.sig(lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
A_ : Optional[Any] = np.multiply(
(data_teach - bp_outa) , np.multiply(lowercase , (1 - bp_outa) ) )
A_ : Tuple = np.multiply(
np.dot(lowercase , self.wkj ) , np.multiply(lowercase , (1 - bp_outa) ) )
A_ : Union[str, Any] = np.dot(lowercase , self.vji )
A_ : int = pd_i_all / (self.size_poolinga * self.size_poolinga)
A_ : str = pd_conva_pooled.T.getA().tolist()
A_ : List[Any] = self._calculate_gradient_from_pool(
lowercase , lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
A_ : int = self._expand_mat(pd_conva_all[k_conv] )
A_ : Any = self.rate_weight * np.dot(lowercase , lowercase )
A_ : Union[str, Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
A_ : Tuple = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
A_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
A_ : List[str] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
A_ : List[Any] = self.thre_bpa - pd_k_all * self.rate_thre
A_ : Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
A_ : Optional[int] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
A_ : List[Any] = rp + 1
A_ : Union[str, Any] = error_count / patterns
all_mse.append(lowercase )
def draw_error():
A_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowercase , """+-""" )
plt.plot(lowercase , """r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowercase , alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, F' - - Mse: {mse:.6f}') )
if draw_e:
draw_error()
return mse
def _a (self , lowercase ):
# model predict
A_ : Tuple = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowercase )) )
for p in range(len(lowercase ) ):
A_ : int = np.asmatrix(datas_test[p] )
A_, A_ : str = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A_ : Dict = self.pooling(lowercase , self.size_poolinga )
A_ : Tuple = self._expand(lowercase )
A_ : int = data_bp_input
A_ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
A_ : List[str] = self.sig(lowercase )
A_ : Optional[Any] = bp_outa * self.wkj.T - self.thre_bpa
A_ : List[Any] = self.sig(lowercase )
produce_out.extend(bp_outa.getA().tolist() )
A_ : Any = [list(map(self.do_round , lowercase ) ) for each in produce_out]
return np.asarray(lowercase )
def _a (self , lowercase ):
# return the data of image after convoluting process so we can check it out
A_ : Optional[Any] = np.asmatrix(lowercase )
A_, A_ : Optional[Any] = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A_ : Union[str, Any] = self.pooling(lowercase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass | 206 | 0 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _SCREAMING_SNAKE_CASE( A ):
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = None ,**SCREAMING_SNAKE_CASE__ ,) -> int:
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE__ ,split=SCREAMING_SNAKE_CASE__ ,features=SCREAMING_SNAKE_CASE__ ,cache_dir=SCREAMING_SNAKE_CASE__ ,keep_in_memory=SCREAMING_SNAKE_CASE__ ,streaming=SCREAMING_SNAKE_CASE__ ,num_proc=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
__SCREAMING_SNAKE_CASE :int = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
__SCREAMING_SNAKE_CASE :Optional[Any] = Text(
cache_dir=SCREAMING_SNAKE_CASE__ ,data_files=SCREAMING_SNAKE_CASE__ ,features=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
if self.streaming:
__SCREAMING_SNAKE_CASE :List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__SCREAMING_SNAKE_CASE :Union[str, Any] = None
__SCREAMING_SNAKE_CASE :str = None
__SCREAMING_SNAKE_CASE :Any = None
__SCREAMING_SNAKE_CASE :str = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ ,download_mode=SCREAMING_SNAKE_CASE__ ,verification_mode=SCREAMING_SNAKE_CASE__ ,base_path=SCREAMING_SNAKE_CASE__ ,num_proc=self.num_proc ,)
__SCREAMING_SNAKE_CASE :Dict = self.builder.as_dataset(
split=self.split ,verification_mode=SCREAMING_SNAKE_CASE__ ,in_memory=self.keep_in_memory )
return dataset
| 359 |
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
lowerCamelCase_ = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": f'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results',
"emoji": True,
},
}
]
lowerCamelCase_ = 0
for log in Path().glob("*.log"):
lowerCamelCase_ = 0
with open(log, "r") as f:
for line in f:
lowerCamelCase_ = json.loads(line)
if line.get("nodeid", "") != "":
lowerCamelCase_ = line["nodeid"]
if line.get("duration", None) is not None:
lowerCamelCase_ = f'{line["duration"]:.4f}'
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase_ = []
log.unlink()
lowerCamelCase_ = ""
lowerCamelCase_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase_ = []
lowerCamelCase_ = {}
for test in failed_tests:
lowerCamelCase_ = test[0].split("::")
lowerCamelCase_ = data[0].split("/")[-1]
if data[0] not in filesafailed:
lowerCamelCase_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase_ = [test[0] for test in failed_table]
lowerCamelCase_ = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase_ = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_0_0_0:
lowerCamelCase_ = "Too many failed tests, please see the full report in the Action results."
lowerCamelCase_ = len(err) + 1_0
lowerCamelCase_ = message[: 3_0_0_0 - offset] + f'\n...\n```\n{err}'
print(f'### {message}')
else:
lowerCamelCase_ = "No failed tests! 🤗"
print(f'## {message}')
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
lowerCamelCase_ = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
lowerCamelCase_ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
lowerCamelCase_ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
payload.append(action_button)
lowerCamelCase_ = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}',
}
],
}
payload.append(date_report)
lowerCamelCase_ = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
lowerCamelCase_ = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase_ = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase_ = row[0]
else:
lowerCamelCase_ = ""
lowerCamelCase_ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```',
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
) | 239 | 0 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
lowercase_ : Optional[Any] = VideoClassificationPipeline(model=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE , top_k=2 )
lowercase_ : Optional[Any] = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for example in examples:
lowercase_ : Tuple = video_classifier(__SCREAMING_SNAKE_CASE )
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
{'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''label''': ANY(__SCREAMING_SNAKE_CASE )},
] , )
@require_torch
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
lowercase_ : str = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
lowercase_ : List[Any] = pipeline(
'''video-classification''' , model=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , frame_sampling_rate=4 )
lowercase_ : List[Any] = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
lowercase_ : Optional[int] = video_classifier(__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}] , )
lowercase_ : Dict = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def _snake_case ( self ):
"""simple docstring"""
pass
| 93 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Tuple = logging.get_logger(__name__)
__A : Optional[Any] = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """audio-spectrogram-transformer"""
def __init__( self : int , __UpperCamelCase : Optional[Any]=7_6_8 , __UpperCamelCase : int=1_2 , __UpperCamelCase : List[Any]=1_2 , __UpperCamelCase : List[Any]=3_0_7_2 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Union[str, Any]=1e-12 , __UpperCamelCase : Optional[Any]=1_6 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : int=1_0 , __UpperCamelCase : Optional[int]=1_0 , __UpperCamelCase : str=1_0_2_4 , __UpperCamelCase : Optional[Any]=1_2_8 , **__UpperCamelCase : Any , )->Tuple:
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = patch_size
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = frequency_stride
_UpperCAmelCase = time_stride
_UpperCAmelCase = max_length
_UpperCAmelCase = num_mel_bins
| 260 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : Any = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 120 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : Optional[Any] = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 120 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def _snake_case ( lowerCAmelCase : Dict=None ):
"""simple docstring"""
if subparsers is not None:
SCREAMING_SNAKE_CASE_ : int = subparsers.add_parser("env" )
else:
SCREAMING_SNAKE_CASE_ : Dict = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file" , default=lowerCAmelCase , help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase )
return parser
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = torch.__version__
SCREAMING_SNAKE_CASE_ : List[str] = torch.cuda.is_available()
SCREAMING_SNAKE_CASE_ : Optional[Any] = is_xpu_available()
SCREAMING_SNAKE_CASE_ : str = is_npu_available()
SCREAMING_SNAKE_CASE_ : int = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = load_config_from_file(args.config_file ).to_dict()
SCREAMING_SNAKE_CASE_ : str = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": f'{pt_version} ({pt_cuda_available})',
"PyTorch XPU available": str(lowerCAmelCase ),
"PyTorch NPU available": str(lowerCAmelCase ),
"System RAM": f'{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB',
}
if pt_cuda_available:
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([f'- {prop}: {val}' for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
SCREAMING_SNAKE_CASE_ : Optional[int] = (
"\n".join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase , lowerCAmelCase )
else f'\t{accelerate_config}'
)
print(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = accelerate_config
return info
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = env_command_parser()
SCREAMING_SNAKE_CASE_ : Optional[Any] = parser.parse_args()
env_command(lowerCAmelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 18 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [text_path]
SCREAMING_SNAKE_CASE_ : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[int] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[Any] = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Tuple = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Dict = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader({"train": text_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Optional[int] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = "train"
SCREAMING_SNAKE_CASE_ : Tuple = {"train": text_path, "test": text_path}
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 18 | 1 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowerCAmelCase : Dict = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = TypeVar('DatasetType', Dataset, IterableDataset)
def A_( A : List[DatasetType] , A : Optional[List[float]] = None , A : Optional[int] = None , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.')
for i, dataset in enumerate(A):
if not isinstance(A , (Dataset, IterableDataset)):
if isinstance(A , (DatasetDict, IterableDatasetDict)):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'is an empty dataset dictionary.')
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(A)}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A))}\']''')
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A).__name__}.''')
if i == 0:
UpperCamelCase , UpperCamelCase = (
(Dataset, IterableDataset) if isinstance(A , A) else (IterableDataset, Dataset)
)
elif not isinstance(A , A):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''')
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''')
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A , A , A , info=A , split=A , stopping_strategy=A)
else:
return _interleave_iterable_datasets(
A , A , A , info=A , split=A , stopping_strategy=A)
def A_( A : List[DatasetType] , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : int = 0 , ):
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.')
for i, dataset in enumerate(A):
if not isinstance(A , (Dataset, IterableDataset)):
if isinstance(A , (DatasetDict, IterableDatasetDict)):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'is an empty dataset dictionary.')
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(A)}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A))}\']''')
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A).__name__}.''')
if i == 0:
UpperCamelCase , UpperCamelCase = (
(Dataset, IterableDataset) if isinstance(A , A) else (IterableDataset, Dataset)
)
elif not isinstance(A , A):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''')
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A , info=A , split=A , axis=A)
else:
return _concatenate_iterable_datasets(A , info=A , split=A , axis=A)
| 251 |
'''simple docstring'''
from ... import PretrainedConfig
lowerCAmelCase : List[str] = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowerCAmelCase_ = """nezha"""
def __init__( self , A_=21128 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=64 , A_=2 , A_=0.02 , A_=1e-12 , A_=0.1 , A_=0 , A_=2 , A_=3 , A_=True , **A_ , )-> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = max_relative_position
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = classifier_dropout
UpperCamelCase = use_cache
| 251 | 1 |
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def a_ ( __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : str ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =sorted(zip(__snake_case , __snake_case ) , key=lambda __snake_case : x[0] / x[1] , reverse=__snake_case )
lowerCamelCase_, lowerCamelCase_ =[i[0] for i in r], [i[1] for i in r]
lowerCamelCase_ =list(accumulate(__snake_case ) )
lowerCamelCase_ =bisect(__snake_case , __snake_case )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
'''simple docstring'''
a_ : Any = [
9_99,
8_00,
7_99,
6_00,
5_99,
5_00,
4_00,
3_99,
3_77,
3_55,
3_33,
3_11,
2_88,
2_66,
2_44,
2_22,
2_00,
1_99,
1_77,
1_55,
1_33,
1_11,
88,
66,
44,
22,
0,
]
a_ : Any = [
9_99,
9_76,
9_52,
9_28,
9_05,
8_82,
8_58,
8_57,
8_10,
7_62,
7_15,
7_14,
5_72,
4_29,
4_28,
2_86,
2_85,
2_38,
1_90,
1_43,
1_42,
1_18,
95,
71,
47,
24,
0,
]
a_ : Optional[Any] = [
9_99,
9_88,
9_77,
9_66,
9_55,
9_44,
9_33,
9_22,
9_11,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_50,
3_00,
2_99,
2_66,
2_33,
2_00,
1_99,
1_79,
1_59,
1_40,
1_20,
1_00,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
a_ : str = [
9_99,
9_95,
9_92,
9_89,
9_85,
9_81,
9_78,
9_75,
9_71,
9_67,
9_64,
9_61,
9_57,
9_56,
9_51,
9_47,
9_42,
9_37,
9_33,
9_28,
9_23,
9_19,
9_14,
9_13,
9_08,
9_03,
8_97,
8_92,
8_87,
8_81,
8_76,
8_71,
8_70,
8_64,
8_58,
8_52,
8_46,
8_40,
8_34,
8_28,
8_27,
8_20,
8_13,
8_06,
7_99,
7_92,
7_85,
7_84,
7_77,
7_70,
7_63,
7_56,
7_49,
7_42,
7_41,
7_33,
7_24,
7_16,
7_07,
6_99,
6_98,
6_88,
6_77,
6_66,
6_56,
6_55,
6_45,
6_34,
6_23,
6_13,
6_12,
5_98,
5_84,
5_70,
5_69,
5_55,
5_41,
5_27,
5_26,
5_05,
4_84,
4_83,
4_62,
4_40,
4_39,
3_96,
3_95,
3_52,
3_51,
3_08,
3_07,
2_64,
2_63,
2_20,
2_19,
1_76,
1_32,
88,
44,
0,
]
a_ : Optional[int] = [
9_99,
9_97,
9_95,
9_92,
9_90,
9_88,
9_86,
9_84,
9_81,
9_79,
9_77,
9_75,
9_72,
9_70,
9_68,
9_66,
9_64,
9_61,
9_59,
9_57,
9_56,
9_54,
9_51,
9_49,
9_46,
9_44,
9_41,
9_39,
9_36,
9_34,
9_31,
9_29,
9_26,
9_24,
9_21,
9_19,
9_16,
9_14,
9_13,
9_10,
9_07,
9_05,
9_02,
8_99,
8_96,
8_93,
8_91,
8_88,
8_85,
8_82,
8_79,
8_77,
8_74,
8_71,
8_70,
8_67,
8_64,
8_61,
8_58,
8_55,
8_52,
8_49,
8_46,
8_43,
8_40,
8_37,
8_34,
8_31,
8_28,
8_27,
8_24,
8_21,
8_17,
8_14,
8_11,
8_08,
8_04,
8_01,
7_98,
7_95,
7_91,
7_88,
7_85,
7_84,
7_80,
7_77,
7_74,
7_70,
7_66,
7_63,
7_60,
7_56,
7_52,
7_49,
7_46,
7_42,
7_41,
7_37,
7_33,
7_30,
7_26,
7_22,
7_18,
7_14,
7_10,
7_07,
7_03,
6_99,
6_98,
6_94,
6_90,
6_85,
6_81,
6_77,
6_73,
6_69,
6_64,
6_60,
6_56,
6_55,
6_50,
6_46,
6_41,
6_36,
6_32,
6_27,
6_22,
6_18,
6_13,
6_12,
6_07,
6_02,
5_96,
5_91,
5_86,
5_80,
5_75,
5_70,
5_69,
5_63,
5_57,
5_51,
5_45,
5_39,
5_33,
5_27,
5_26,
5_19,
5_12,
5_05,
4_98,
4_91,
4_84,
4_83,
4_74,
4_66,
4_57,
4_49,
4_40,
4_39,
4_28,
4_18,
4_07,
3_96,
3_95,
3_81,
3_66,
3_52,
3_51,
3_30,
3_08,
3_07,
2_86,
2_64,
2_63,
2_42,
2_20,
2_19,
1_76,
1_75,
1_32,
1_31,
88,
44,
0,
]
a_ : Dict = [
9_99,
9_91,
9_82,
9_74,
9_66,
9_58,
9_50,
9_41,
9_33,
9_25,
9_16,
9_08,
9_00,
8_99,
8_74,
8_50,
8_25,
8_00,
7_99,
7_00,
6_00,
5_00,
4_00,
3_00,
2_00,
1_00,
0,
]
a_ : Tuple = [
9_99,
9_92,
9_85,
9_78,
9_71,
9_64,
9_57,
9_49,
9_42,
9_35,
9_28,
9_21,
9_14,
9_07,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_00,
2_99,
2_00,
1_99,
1_00,
99,
0,
]
a_ : Any = [
9_99,
9_96,
9_92,
9_89,
9_85,
9_82,
9_79,
9_75,
9_72,
9_68,
9_65,
9_61,
9_58,
9_55,
9_51,
9_48,
9_44,
9_41,
9_38,
9_34,
9_31,
9_27,
9_24,
9_20,
9_17,
9_14,
9_10,
9_07,
9_03,
9_00,
8_99,
8_91,
8_84,
8_76,
8_69,
8_61,
8_53,
8_46,
8_38,
8_30,
8_23,
8_15,
8_08,
8_00,
7_99,
7_88,
7_77,
7_66,
7_55,
7_44,
7_33,
7_22,
7_11,
7_00,
6_99,
6_88,
6_77,
6_66,
6_55,
6_44,
6_33,
6_22,
6_11,
6_00,
5_99,
5_85,
5_71,
5_57,
5_42,
5_28,
5_14,
5_00,
4_99,
4_85,
4_71,
4_57,
4_42,
4_28,
4_14,
4_00,
3_99,
3_79,
3_59,
3_40,
3_20,
3_00,
2_99,
2_79,
2_59,
2_40,
2_20,
2_00,
1_99,
1_66,
1_33,
1_00,
99,
66,
33,
0,
]
| 75 | 1 |
"""simple docstring"""
def lowercase (SCREAMING_SNAKE_CASE_ : int = 10_00 ) -> int:
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 353 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str ) -> Any:
# Load configuration defined in the metadata file
with open(SCREAMING_SNAKE_CASE_ ) as metadata_file:
SCREAMING_SNAKE_CASE = json.load(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE_ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )['module']
# Load the entity vocab file
SCREAMING_SNAKE_CASE = load_original_entity_vocab(SCREAMING_SNAKE_CASE_ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE = AddedToken('<ent>' , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = AddedToken('<ent2>' , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , 'tokenizer_config.json' ) , 'r' ) as f:
SCREAMING_SNAKE_CASE = json.load(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = 'MLukeTokenizer'
with open(os.path.join(SCREAMING_SNAKE_CASE_ , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(['@'] )[0]
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(['#'] )[0]
SCREAMING_SNAKE_CASE = state_dict['embeddings.word_embeddings.weight']
SCREAMING_SNAKE_CASE = word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE = state_dict[bias_name]
SCREAMING_SNAKE_CASE = decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE = F'encoder.layer.{layer_index}.attention.self.'
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE = state_dict['entity_embeddings.entity_embeddings.weight']
SCREAMING_SNAKE_CASE = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE = state_dict['entity_predictions.bias']
SCREAMING_SNAKE_CASE = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE = LukeForMaskedLM(config=SCREAMING_SNAKE_CASE_ ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
SCREAMING_SNAKE_CASE = state_dict[key]
else:
SCREAMING_SNAKE_CASE = state_dict[key]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
if set(SCREAMING_SNAKE_CASE_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(SCREAMING_SNAKE_CASE_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , task='entity_classification' )
SCREAMING_SNAKE_CASE = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
SCREAMING_SNAKE_CASE = (0, 9)
SCREAMING_SNAKE_CASE = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , return_tensors='pt' )
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE = torch.Size((1, 33, 7_68) )
SCREAMING_SNAKE_CASE = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE = torch.Size((1, 1, 7_68) )
SCREAMING_SNAKE_CASE = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = 'Tokyo is the capital of <mask>.'
SCREAMING_SNAKE_CASE = (24, 30)
SCREAMING_SNAKE_CASE = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , return_tensors='pt' )
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = encoding['input_ids'][0].tolist()
SCREAMING_SNAKE_CASE = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
SCREAMING_SNAKE_CASE = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(SCREAMING_SNAKE_CASE_ ) )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> int:
SCREAMING_SNAKE_CASE = ['[MASK]', '[PAD]', '[UNK]']
SCREAMING_SNAKE_CASE = [json.loads(SCREAMING_SNAKE_CASE_ ) for line in open(SCREAMING_SNAKE_CASE_ )]
SCREAMING_SNAKE_CASE = {}
for entry in data:
SCREAMING_SNAKE_CASE = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE = entity_id
break
SCREAMING_SNAKE_CASE = F'{language}:{entity_name}'
SCREAMING_SNAKE_CASE = entity_id
return new_mapping
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__UpperCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 38 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 214 |
from __future__ import annotations
def _a ( lowerCamelCase: list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(lowerCamelCase , [] , 0 , [0 for i in range(len(lowerCamelCase ) )] )
def _a ( lowerCamelCase: list[int | str] , lowerCamelCase: list[int | str] , lowerCamelCase: int , lowerCamelCase: list[int] , ) -> None:
'''simple docstring'''
if index == len(lowerCamelCase ):
print(lowerCamelCase )
return
for i in range(len(lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__A = True
create_state_space_tree(lowerCamelCase , lowerCamelCase , index + 1 , lowerCamelCase )
current_sequence.pop()
__A = False
snake_case__ : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
snake_case__ : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 117 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=A ):
'''simple docstring'''
lowerCAmelCase__ = ["""onnx"""]
def __init__( self : List[str] , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Optional[Any]):
'''simple docstring'''
requires_backends(self , ['onnx'])
@classmethod
def __lowerCamelCase ( cls : str , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
requires_backends(cls , ['onnx'])
@classmethod
def __lowerCamelCase ( cls : Optional[Any] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : int):
'''simple docstring'''
requires_backends(cls , ['onnx'])
| 356 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase = {
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 48 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Dict = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=False ) -> Union[str, Any]:
lowerCamelCase__ : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase__ : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Tuple:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase__ : Optional[int] = ''
else:
lowerCamelCase__ : List[str] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : str = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowerCamelCase__ : List[str] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : int = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase__ : str = in_proj_bias[: config.hidden_size]
lowerCamelCase__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : str = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ : List[Any] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : int = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__A , __A )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
lowerCamelCase__ : Union[str, Any] = dct.pop(__A )
lowerCamelCase__ : List[str] = val
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
lowerCamelCase__ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : Optional[Any] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
lowerCamelCase__ : Dict = ViTConfig()
lowerCamelCase__ : Union[str, Any] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Optional[Any] = int(vit_name[-12:-10] )
lowerCamelCase__ : Optional[Any] = int(vit_name[-9:-6] )
else:
lowerCamelCase__ : Optional[int] = 1000
lowerCamelCase__ : str = 'huggingface/label-files'
lowerCamelCase__ : List[Any] = 'imagenet-1k-id2label.json'
lowerCamelCase__ : List[Any] = json.load(open(hf_hub_download(__A , __A , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ : Dict = {int(__A ): v for k, v in idalabel.items()}
lowerCamelCase__ : Optional[int] = idalabel
lowerCamelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : Optional[int] = int(vit_name[-6:-4] )
lowerCamelCase__ : Optional[int] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
lowerCamelCase__ : Any = 192
lowerCamelCase__ : str = 768
lowerCamelCase__ : Tuple = 12
lowerCamelCase__ : Any = 3
elif vit_name[9:].startswith('small' ):
lowerCamelCase__ : Dict = 384
lowerCamelCase__ : int = 1536
lowerCamelCase__ : Tuple = 12
lowerCamelCase__ : List[Any] = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
lowerCamelCase__ : Optional[int] = 768
lowerCamelCase__ : Tuple = 2304
lowerCamelCase__ : List[str] = 8
lowerCamelCase__ : List[str] = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
lowerCamelCase__ : int = 1024
lowerCamelCase__ : Any = 4096
lowerCamelCase__ : Tuple = 24
lowerCamelCase__ : int = 16
elif vit_name[4:].startswith('huge' ):
lowerCamelCase__ : Any = 1280
lowerCamelCase__ : int = 5120
lowerCamelCase__ : str = 32
lowerCamelCase__ : List[Any] = 16
# load original model from timm
lowerCamelCase__ : List[str] = timm.create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase__ : int = timm_model.state_dict()
if base_model:
remove_classification_head_(__A )
lowerCamelCase__ : Optional[int] = create_rename_keys(__A , __A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_q_k_v(__A , __A , __A )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase__ : Tuple = ViTModel(__A ).eval()
else:
lowerCamelCase__ : List[str] = ViTForImageClassification(__A ).eval()
model.load_state_dict(__A )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowerCamelCase__ : Union[str, Any] = DeiTImageProcessor(size=config.image_size )
else:
lowerCamelCase__ : Dict = ViTImageProcessor(size=config.image_size )
lowerCamelCase__ : Dict = image_processor(images=prepare_img() , return_tensors='pt' )
lowerCamelCase__ : Optional[int] = encoding['pixel_values']
lowerCamelCase__ : List[Any] = model(__A )
if base_model:
lowerCamelCase__ : Union[str, Any] = timm_model.forward_features(__A )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__A , outputs.pooler_output , atol=1e-3 )
else:
lowerCamelCase__ : Any = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1e-3 )
Path(__A ).mkdir(exist_ok=__A )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 50 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
snake_case_ : Tuple = logging.get_logger(__name__)
def A (__A : bool , __A : bool ) -> Optional[Any]:
"""simple docstring"""
def run_func(__A : Optional[Any] ):
@wraps(__A )
def run_in_eager_mode(*__A : Dict , **__A : List[Any] ):
return func(*__A , **__A )
@wraps(__A )
@tf.function(experimental_compile=__A )
def run_in_graph_mode(*__A : Optional[Any] , **__A : Any ):
return func(*__A , **__A )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def A (__A : int , __A : int , __A : int ) -> ["tf.Tensor"]:
"""simple docstring"""
UpperCAmelCase_ = random.Random()
UpperCAmelCase_ = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__A , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __snake_case ( a ):
UpperCAmelCase__ : TensorFlowBenchmarkArguments
UpperCAmelCase__ : PretrainedConfig
UpperCAmelCase__ : str = "TensorFlow"
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return tf.__version__
def lowerCamelCase ( self : Dict , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_inference_func(_snake_case , _snake_case , _snake_case)
return self._measure_speed(_inference)
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_train_func(_snake_case , _snake_case , _snake_case)
return self._measure_speed(_train)
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _snake_case)
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_inference_func(_snake_case , _snake_case , _snake_case)
return self._measure_memory(_inference)
def lowerCamelCase ( self : Optional[Any] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _snake_case)
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_train_func(_snake_case , _snake_case , _snake_case)
return self._measure_memory(_train)
def lowerCamelCase ( self : Optional[int] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''')
UpperCAmelCase_ = (
hasattr(_snake_case , '''architectures''')
and isinstance(config.architectures , _snake_case)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ = __import__('''transformers''' , fromlist=[model_class])
UpperCAmelCase_ = getattr(_snake_case , _snake_case)
UpperCAmelCase_ = model_cls(_snake_case)
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''')
else:
UpperCAmelCase_ = TF_MODEL_MAPPING[config.__class__](_snake_case)
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ = config.vocab_size if hasattr(_snake_case , '''vocab_size''') else config.encoder.vocab_size
UpperCAmelCase_ = random_input_ids(_snake_case , _snake_case , _snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_forward():
return model(_snake_case , decoder_input_ids=_snake_case , training=_snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_forward():
return model(_snake_case , training=_snake_case)
UpperCAmelCase_ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCamelCase ( self : Optional[Any] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''')
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''')
UpperCAmelCase_ = (
hasattr(_snake_case , '''architectures''')
and isinstance(config.architectures , _snake_case)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ = __import__('''transformers''' , fromlist=[model_class])
UpperCAmelCase_ = getattr(_snake_case , _snake_case)
UpperCAmelCase_ = model_cls(_snake_case)
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''')
else:
UpperCAmelCase_ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_snake_case)
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ = config.vocab_size if hasattr(_snake_case , '''vocab_size''') else config.encoder.vocab_size
UpperCAmelCase_ = random_input_ids(_snake_case , _snake_case , _snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_train():
UpperCAmelCase_ = model(_snake_case , decoder_input_ids=_snake_case , labels=_snake_case , training=_snake_case)[0]
UpperCAmelCase_ = tf.gradients(_snake_case , model.trainable_variables)
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_train():
UpperCAmelCase_ = model(_snake_case , labels=_snake_case , training=_snake_case)[0]
UpperCAmelCase_ = tf.gradients(_snake_case , model.trainable_variables)
return gradients
UpperCAmelCase_ = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCamelCase ( self : Any , _snake_case : Optional[Any]):
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''')
timeit.repeat(_snake_case , repeat=1 , number=5)
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase_ = timeit.repeat(
_snake_case , repeat=self.args.repeat , number=10 , )
return min(_snake_case) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""")
def lowerCamelCase ( self : Dict , _snake_case : Callable[[], None]):
"""simple docstring"""
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''')
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''')
UpperCAmelCase_ = start_memory_tracing('''transformers''')
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''')
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''')
UpperCAmelCase_ = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''')
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase_ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
UpperCAmelCase_ = nvml.nvmlDeviceGetMemoryInfo(_snake_case)
UpperCAmelCase_ = meminfo.used
UpperCAmelCase_ = Memory(_snake_case)
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''')
UpperCAmelCase_ = None
else:
UpperCAmelCase_ = measure_peak_memory_cpu(_snake_case)
UpperCAmelCase_ = Memory(_snake_case) if isinstance(_snake_case , _snake_case) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase_ = stop_memory_tracing(_snake_case)
if memory is None:
UpperCAmelCase_ = summary.total
else:
UpperCAmelCase_ = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""")
return "N/A", None
| 51 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
_UpperCAmelCase : Any = parent
_UpperCAmelCase : Optional[Any] = batch_size
_UpperCAmelCase : Optional[int] = seq_length
_UpperCAmelCase : Tuple = is_training
_UpperCAmelCase : Union[str, Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : Tuple = vocab_size
_UpperCAmelCase : int = hidden_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Dict = attention_probs_dropout_prob
_UpperCAmelCase : int = max_position_embeddings
_UpperCAmelCase : List[str] = type_vocab_size
_UpperCAmelCase : Optional[Any] = type_sequence_label_size
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : List[str] = num_labels
_UpperCAmelCase : str = num_choices
_UpperCAmelCase : Optional[int] = scope
_UpperCAmelCase : Union[str, Any] = self.vocab_size - 1
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = None
_UpperCAmelCase : Optional[int] = None
if self.use_labels:
_UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : Dict = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , *A_ ):
'''simple docstring'''
_UpperCAmelCase : Dict = OpenAIGPTModel(config=A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase : Dict = model(A_ , token_type_ids=A_ , head_mask=A_ )
_UpperCAmelCase : str = model(A_ , token_type_ids=A_ )
_UpperCAmelCase : Any = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , *A_ ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = OpenAIGPTLMHeadModel(A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase : Optional[Any] = model(A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , *A_ ):
'''simple docstring'''
_UpperCAmelCase : str = OpenAIGPTDoubleHeadsModel(A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase : int = model(A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , *A_ ):
'''simple docstring'''
_UpperCAmelCase : int = self.num_labels
_UpperCAmelCase : Dict = OpenAIGPTForSequenceClassification(A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : int = model(A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
_UpperCAmelCase
) : Any = config_and_inputs
_UpperCAmelCase : Dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowercase = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_lowercase = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_lowercase = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _UpperCAmelCase ( self , A_ , A_ , A_=False ):
'''simple docstring'''
_UpperCAmelCase : Tuple = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_UpperCAmelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=A_ , )
_UpperCAmelCase : List[str] = inputs_dict["labels"]
_UpperCAmelCase : Tuple = inputs_dict["labels"]
_UpperCAmelCase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=A_ , )
_UpperCAmelCase : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
return inputs_dict
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = OpenAIGPTModelTester(self )
_UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=A_ , n_embd=37 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A_ )
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Dict = OpenAIGPTModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(A_ )
_UpperCAmelCase : Tuple = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=A_ ) # the president is
_UpperCAmelCase : Optional[Any] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_UpperCAmelCase : Optional[int] = model.generate(A_ , do_sample=A_ )
self.assertListEqual(output_ids[0].tolist() , A_ )
| 371 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] ) -> Tuple:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCAmelCase : Optional[int] = k.replace(lowerCAmelCase , lowerCAmelCase )
if k.startswith("encoder" ):
_UpperCAmelCase : Tuple = k.replace(".attn" , ".self_attn" )
_UpperCAmelCase : str = k.replace("norm1" , "self_attn_layer_norm" )
_UpperCAmelCase : Optional[Any] = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
_UpperCAmelCase : Union[str, Any] = k.replace("norm1" , "self_attn_layer_norm" )
_UpperCAmelCase : Tuple = k.replace("norm2" , "encoder_attn_layer_norm" )
_UpperCAmelCase : Any = k.replace("norm3" , "final_layer_norm" )
return k
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[int] ) -> List[Any]:
_UpperCAmelCase : int = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
_UpperCAmelCase : Dict = sd.pop(lowerCAmelCase )
_UpperCAmelCase : str = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
_UpperCAmelCase : Dict = v
SCREAMING_SNAKE_CASE_ = ['START']
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: int , lowerCAmelCase: int ) -> int:
_UpperCAmelCase : Union[str, Any] = torch.load(lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase : List[str] = model["model"]
_UpperCAmelCase : List[str] = BlenderbotConfig.from_json_file(lowerCAmelCase )
_UpperCAmelCase : str = BlenderbotForConditionalGeneration(lowerCAmelCase )
_UpperCAmelCase : List[str] = m.model.state_dict().keys()
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[str] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCAmelCase : Tuple = rename_state_dict_key(lowerCAmelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCAmelCase : Tuple = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(lowerCAmelCase )
m.model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
m.half()
m.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 189 | 0 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Any=32 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Union[str, Any]=10 , UpperCamelCase__ : List[str]=[10, 20, 30, 40] , UpperCamelCase__ : Optional[int]=[1, 1, 2, 1] , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : str="relu" , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Tuple=None , ) -> List[str]:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = num_channels
__magic_name__ = embeddings_size
__magic_name__ = hidden_sizes
__magic_name__ = depths
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = hidden_act
__magic_name__ = num_labels
__magic_name__ = scope
__magic_name__ = len(UpperCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = self.get_config()
return config, pixel_values
def _lowercase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict ) -> List[str]:
"""simple docstring"""
__magic_name__ = FlaxRegNetModel(config=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowercase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = self.num_labels
__magic_name__ = FlaxRegNetForImageClassification(config=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ = config_and_inputs
__magic_name__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _A , unittest.TestCase ):
'''simple docstring'''
a__ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
a__ = False
a__ = False
a__ = False
def _lowercase ( self : int ) -> None:
"""simple docstring"""
__magic_name__ = FlaxRegNetModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return
def _lowercase ( self : List[str] ) -> Any:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def _lowercase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
pass
def _lowercase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _lowercase ( self : List[Any] ) -> Any:
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] ):
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__magic_name__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__magic_name__ = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = model_class(UpperCamelCase__ )
@jax.jit
def model_jitted(UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any ):
return model(pixel_values=UpperCamelCase__ , **UpperCamelCase__ )
with self.subTest("""JIT Enabled""" ):
__magic_name__ = model_jitted(**UpperCamelCase__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__magic_name__ = model_jitted(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def a__ ( ):
'''simple docstring'''
__magic_name__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=UpperCamelCase__ , return_tensors="""np""" )
__magic_name__ = model(**UpperCamelCase__ )
# verify the logits
__magic_name__ = (1, 1000)
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
__magic_name__ = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 88 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
A_ : Tuple = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
A_ : Tuple = 12_8022
A_ : Optional[Any] = 12_8028
@require_sentencepiece
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Tuple = MaMaaaTokenizer
UpperCAmelCase__: List[Any] = False
UpperCAmelCase__: Any = False
UpperCAmelCase__: Optional[Any] = True
def __A ( self ):
super().setUp()
A__ : Union[str, Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
A__ : Optional[Any] = dict(zip(A__ , range(len(A__ ) ) ) )
A__ : Optional[int] = Path(self.tmpdirname )
save_json(A__ , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(A__ , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
A__ : Tuple = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self , **A__ ):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **A__ )
def __A ( self , A__ ):
return (
"This is a test",
"This is a test",
)
def __A ( self ):
A__ : Any = """</s>"""
A__ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A__ ) , A__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A__ ) , A__ )
def __A ( self ):
A__ : str = self.get_tokenizer()
A__ : Dict = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<s>""" )
self.assertEqual(len(A__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("""Skip this test while all models are still to be uploaded.""" )
def __A ( self ):
pass
def __A ( self ):
A__ : Optional[int] = self.get_tokenizer()
A__ : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A__ ) , [2, 3, 4, 5, 6] , )
A__ : Dict = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(A__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
A__ : Any = tokenizer.convert_tokens_to_string(A__ )
self.assertEqual(A__ , """This is a test""" )
@slow
def __A ( self ):
# fmt: off
A__ : int = {"""input_ids""": [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A__ , model_name="""facebook/m2m100_418M""" , revision="""c168bae485c864188cf9aa0e4108b0b6934dc91e""" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = '''facebook/m2m100_418M'''
UpperCAmelCase__: Any = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
UpperCAmelCase__: Any = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
UpperCAmelCase__: List[str] = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def __A ( cls ):
A__ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en""" , tgt_lang="""fr""" )
A__ : int = 1
return cls
def __A ( self ):
self.assertEqual(self.tokenizer.get_lang_id("""ar""" ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id("""en""" ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id("""ro""" ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id("""mr""" ) , 12_8063 )
def __A ( self ):
A__ : Optional[Any] = self.tokenizer.get_vocab()
self.assertEqual(len(A__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["""<unk>"""] , 3 )
self.assertIn(self.tokenizer.get_lang_token("""en""" ) , A__ )
def __A ( self ):
A__ : List[Any] = """en"""
A__ : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A__ )
def __A ( self ):
self.assertIn(A__ , self.tokenizer.all_special_ids )
# fmt: off
A__ : Dict = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
A__ : Dict = self.tokenizer.decode(A__ , skip_special_tokens=A__ )
A__ : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A__ )
self.assertEqual(A__ , A__ )
self.assertNotIn(self.tokenizer.eos_token , A__ )
def __A ( self ):
A__ : str = tempfile.mkdtemp()
A__ : Dict = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(A__ )
A__ : List[Any] = MaMaaaTokenizer.from_pretrained(A__ )
self.assertDictEqual(new_tok.lang_token_to_id , A__ )
@require_torch
def __A ( self ):
A__ : List[str] = """en"""
A__ : List[str] = """fr"""
A__ : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A__ , return_tensors="""pt""" )
A__ : int = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
A__ : Any = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __A ( self ):
A__ : List[str] = """mr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
A__ : Any = """zh"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __A ( self ):
A__ : Optional[int] = """mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
A__ : Any = """zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __A ( self ):
A__ : Any = self.tokenizer._build_translation_inputs("""A test""" , return_tensors="""pt""" , src_lang="""en""" , tgt_lang="""ar""" )
self.assertEqual(
nested_simplify(A__ ) , {
# en_XX, A, test, EOS
"""input_ids""": [[12_8022, 58, 4183, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 12_8006,
} , )
| 192 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCamelCase ( unittest.TestCase ):
def __init__(self : Optional[int] , _A : Dict , _A : Dict=7 , _A : Optional[int]=3 , _A : List[Any]=18 , _A : Optional[int]=30 , _A : int=4_00 , _A : List[str]=True , _A : Tuple=None , _A : List[str]=True , _A : Dict=None , ) -> Dict:
__snake_case : str = size if size is not None else {'shortest_edge': 20}
__snake_case : str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
__snake_case : Any = parent
__snake_case : List[str] = batch_size
__snake_case : Dict = num_channels
__snake_case : int = image_size
__snake_case : Union[str, Any] = min_resolution
__snake_case : List[str] = max_resolution
__snake_case : Dict = do_resize
__snake_case : Any = size
__snake_case : Any = do_center_crop
__snake_case : Dict = crop_size
def _lowercase (self : Optional[Any]) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase ( lowercase , unittest.TestCase ):
UpperCAmelCase : List[str] = MobileNetVaImageProcessor if is_vision_available() else None
def _lowercase (self : Optional[Any]) -> Any:
__snake_case : List[Any] = MobileNetVaImageProcessingTester(self)
@property
def _lowercase (self : Dict) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase (self : List[str]) -> Tuple:
__snake_case : str = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_A , 'do_resize'))
self.assertTrue(hasattr(_A , 'size'))
self.assertTrue(hasattr(_A , 'do_center_crop'))
self.assertTrue(hasattr(_A , 'crop_size'))
def _lowercase (self : List[Any]) -> Optional[Any]:
__snake_case : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 20})
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18})
__snake_case : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {'shortest_edge': 42})
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84})
def _lowercase (self : Union[str, Any]) -> int:
pass
def _lowercase (self : str) -> Optional[int]:
# Initialize image_processing
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__snake_case : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A)
for image in image_inputs:
self.assertIsInstance(_A , Image.Image)
# Test not batched input
__snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : int = image_processing(_A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _lowercase (self : Tuple) -> Dict:
# Initialize image_processing
__snake_case : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A)
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray)
# Test not batched input
__snake_case : int = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : Tuple = image_processing(_A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _lowercase (self : Union[str, Any]) -> List[Any]:
# Initialize image_processing
__snake_case : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__snake_case : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A)
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor)
# Test not batched input
__snake_case : Dict = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : int = image_processing(_A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 368 | """simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def __UpperCAmelCase ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCAmelCase_ , 2 ) - pow(UpperCAmelCase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCAmelCase_ , 2 ) - pow(UpperCAmelCase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCAmelCase_ , 2 ) + pow(UpperCAmelCase_ , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Optional[Any] = '''ZinengTang/tvlt-base'''
lowerCAmelCase__ : List[Any] = tempfile.mkdtemp()
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> str:
return TvltImageProcessor.from_pretrained(self.checkpoint ,**SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> Optional[Any]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint ,**SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : List[str] = self.get_image_processor()
lowerCAmelCase__ : str = self.get_feature_extractor()
lowerCAmelCase__ : int = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : List[str] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor ,SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor.image_processor ,SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : int = self.get_image_processor()
lowerCAmelCase__ : Any = self.get_feature_extractor()
lowerCAmelCase__ : int = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = np.ones([1_2000] )
lowerCAmelCase__ : Optional[Any] = feature_extractor(SCREAMING_SNAKE_CASE_ ,return_tensors="""np""" )
lowerCAmelCase__ : int = processor(audio=SCREAMING_SNAKE_CASE_ ,return_tensors="""np""" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : Optional[int] = self.get_image_processor()
lowerCAmelCase__ : List[Any] = self.get_feature_extractor()
lowerCAmelCase__ : List[Any] = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = np.ones([3, 224, 224] )
lowerCAmelCase__ : Optional[int] = image_processor(SCREAMING_SNAKE_CASE_ ,return_tensors="""np""" )
lowerCAmelCase__ : Any = processor(images=SCREAMING_SNAKE_CASE_ ,return_tensors="""np""" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = self.get_image_processor()
lowerCAmelCase__ : Tuple = self.get_feature_extractor()
lowerCAmelCase__ : Any = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = np.ones([1_2000] )
lowerCAmelCase__ : Dict = np.ones([3, 224, 224] )
lowerCAmelCase__ : Union[str, Any] = processor(audio=SCREAMING_SNAKE_CASE_ ,images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) ,["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : Union[str, Any] = self.get_image_processor()
lowerCAmelCase__ : List[str] = self.get_feature_extractor()
lowerCAmelCase__ : List[Any] = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
processor.model_input_names ,image_processor.model_input_names + feature_extractor.model_input_names ,msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" ,)
| 37 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 319 | 0 |
"""simple docstring"""
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A_ : int =2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
A_ : Union[str, Any] ={
# fairseq:
"""wmt19-ru-en""": {"""length_penalty""": 1.1},
"""wmt19-en-ru""": {"""length_penalty""": 1.15},
"""wmt19-en-de""": {"""length_penalty""": 1.0},
"""wmt19-de-en""": {"""length_penalty""": 1.1},
# allenai:
"""wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-12-1""": {"""length_penalty""": 0.8},
"""wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6},
"""wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6},
}
# this remaps the different models to their organization names
A_ : Optional[Any] ={}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A_ : str ="""facebook"""
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
A_ : Dict ="""allenai"""
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[int] )-> List[str]:
_lowerCamelCase = dict((re.sub(r'@@$' , '' , _a ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _a ), v) for k, v in d.items() )
_lowerCamelCase = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'{k}</w>']
_lowerCamelCase = d[k] # restore
return da
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, Any] , snake_case : Any )-> Any:
assert os.path.exists(_a )
os.makedirs(_a , exist_ok=_a )
print(f'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
_lowerCamelCase = basename(_a )
_lowerCamelCase = dirname(_a )
_lowerCamelCase = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
_lowerCamelCase = cls.hub_models()
_lowerCamelCase = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
_lowerCamelCase = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'using checkpoint {checkpoint_file}' )
_lowerCamelCase = hub_utils.from_pretrained(
_a , _a , _a , archive_map=_a , **_a )
_lowerCamelCase = vars(chkpt['args']['model'] )
_lowerCamelCase = args['source_lang']
_lowerCamelCase = args['target_lang']
_lowerCamelCase = dirname(_a )
_lowerCamelCase = basename(_a )
# dicts
_lowerCamelCase = os.path.join(_a , f'dict.{src_lang}.txt' )
_lowerCamelCase = os.path.join(_a , f'dict.{tgt_lang}.txt' )
_lowerCamelCase = Dictionary.load(_a )
_lowerCamelCase = rewrite_dict_keys(src_dict.indices )
_lowerCamelCase = len(_a )
_lowerCamelCase = os.path.join(_a , 'vocab-src.json' )
print(f'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' )
with open(_a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
_lowerCamelCase = True
for k in src_vocab.keys():
if not k.islower():
_lowerCamelCase = False
break
_lowerCamelCase = Dictionary.load(_a )
_lowerCamelCase = rewrite_dict_keys(tgt_dict.indices )
_lowerCamelCase = len(_a )
_lowerCamelCase = os.path.join(_a , 'vocab-tgt.json' )
print(f'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' )
with open(_a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a ) )
# merges_file (bpecodes)
_lowerCamelCase = os.path.join(_a , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
_lowerCamelCase = os.path.join(_a , _a )
if os.path.exists(_a ):
break
with open(_a , encoding='utf-8' ) as fin:
_lowerCamelCase = fin.read()
_lowerCamelCase = re.sub(r' \d+$' , '' , _a , 0 , re.M ) # remove frequency number
print(f'Generating {merges_file}' )
with open(_a , 'w' , encoding='utf-8' ) as fout:
fout.write(_a )
# model config
_lowerCamelCase = os.path.join(_a , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", f'need to extend tokenizer to support bpe={args["tokenizer"]}'
_lowerCamelCase = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.0_2,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
_lowerCamelCase = 5
_lowerCamelCase = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
_lowerCamelCase = best_score_hparams[model_dir]['length_penalty']
else:
_lowerCamelCase = 1.0
print(f'Generating {fsmt_model_config_file}' )
with open(_a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a ) )
# tokenizer config
_lowerCamelCase = os.path.join(_a , _a )
_lowerCamelCase = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1_024,
'do_lower_case': do_lower_case,
}
print(f'Generating {fsmt_tokenizer_config_file}' )
with open(_a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a ) )
# model
_lowerCamelCase = chkpt['models'][0]
_lowerCamelCase = model.state_dict()
# rename keys to start with 'model.'
_lowerCamelCase = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
_lowerCamelCase = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_a , _a )
_lowerCamelCase = FSMTConfig.from_pretrained(_a )
_lowerCamelCase = FSMTForConditionalGeneration(_a )
# check that it loads ok
model_new.load_state_dict(_a , strict=_a )
# save
_lowerCamelCase = os.path.join(_a , _a )
print(f'Generating {pytorch_weights_dump_path}' )
torch.save(_a , _a )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f'cd {data_root}' )
print(f'transformers-cli upload {model_dir}' )
if __name__ == "__main__":
A_ : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fsmt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A_ : Optional[int] =parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 361 |
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def SCREAMING_SNAKE_CASE_ ( )-> Any:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_lowerCamelCase = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , snake_case ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def SCREAMING_SNAKE_CASE_ ( )-> Optional[int]:
assert _test_patching.open is open
_lowerCamelCase = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , snake_case ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def SCREAMING_SNAKE_CASE_ ( )-> Tuple:
# pandas.read_csv is not present in _test_patching
_lowerCamelCase = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , snake_case ):
pass
def SCREAMING_SNAKE_CASE_ ( )-> Any:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
_lowerCamelCase = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , snake_case ) is None
with patch_submodule(_test_patching , 'len' , snake_case ):
assert _test_patching.len is mock
assert _test_patching.len is len
def SCREAMING_SNAKE_CASE_ ( )-> Any:
_lowerCamelCase = '__test_patch_submodule_start_and_stop_mock__'
_lowerCamelCase = patch_submodule(_test_patching , 'open' , snake_case )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def SCREAMING_SNAKE_CASE_ ( )-> Tuple:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_lowerCamelCase = '__test_patch_submodule_successive_join__'
_lowerCamelCase = '__test_patch_submodule_successive_dirname__'
_lowerCamelCase = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , snake_case ):
with patch_submodule(_test_patching , 'os.rename' , snake_case ):
with patch_submodule(_test_patching , 'os.path.dirname' , snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , snake_case ):
with patch_submodule(_test_patching , 'os.path.join' , snake_case ):
with patch_submodule(_test_patching , 'os.path.dirname' , snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def SCREAMING_SNAKE_CASE_ ( )-> Optional[int]:
_lowerCamelCase = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , snake_case ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , snake_case ):
pass
| 80 | 0 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def a__ ( ):
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 108 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) ->str:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: Optional[int] = F'Expected string as input, found {type(_SCREAMING_SNAKE_CASE )}'
raise ValueError(_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: List[str] = F'Expected boolean as use_pascal parameter, found {type(_SCREAMING_SNAKE_CASE )}'
raise ValueError(_SCREAMING_SNAKE_CASE )
a__: int = input_str.split('_' )
a__: List[str] = 0 if use_pascal else 1
a__: List[str] = words[start_index:]
a__: List[str] = [word[0].upper() + word[1:] for word in words_to_capitalize]
a__: List[str] = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 290 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def UpperCamelCase_( snake_case__: Dict ) -> List[Any]:
# initialize config
if "resnet-50" in model_name:
UpperCAmelCase__ = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
UpperCAmelCase__ = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
UpperCAmelCase__ = DetrConfig(use_timm_backbone=snake_case__ , backbone_config=snake_case__ )
# set label attributes
UpperCAmelCase__ = """panoptic""" in model_name
if is_panoptic:
UpperCAmelCase__ = 2_50
else:
UpperCAmelCase__ = 91
UpperCAmelCase__ = """huggingface/label-files"""
UpperCAmelCase__ = """coco-detection-id2label.json"""
UpperCAmelCase__ = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
UpperCAmelCase__ = {int(snake_case__ ): v for k, v in idalabel.items()}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase_( snake_case__: Union[str, Any] ) -> Any:
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCAmelCase__ = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f"transformer.encoder.layers.{i}.self_attn.out_proj.weight",
f"encoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias") )
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"transformer.decoder.layers.{i}.self_attn.out_proj.weight",
f"decoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
) )
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
) )
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias") )
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: Any , snake_case__: List[str] ) -> int:
UpperCAmelCase__ = state_dict.pop(snake_case__ )
UpperCAmelCase__ = val
def UpperCamelCase_( snake_case__: str , snake_case__: Optional[int]=False ) -> List[Any]:
UpperCAmelCase__ = """"""
if is_panoptic:
UpperCAmelCase__ = """detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase__ = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
UpperCAmelCase__ = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ = in_proj_weight[:2_56, :]
UpperCAmelCase__ = in_proj_bias[:2_56]
UpperCAmelCase__ = in_proj_weight[2_56:5_12, :]
UpperCAmelCase__ = in_proj_bias[2_56:5_12]
UpperCAmelCase__ = in_proj_weight[-2_56:, :]
UpperCAmelCase__ = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase__ = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
UpperCAmelCase__ = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ = in_proj_weight[:2_56, :]
UpperCAmelCase__ = in_proj_bias[:2_56]
UpperCAmelCase__ = in_proj_weight[2_56:5_12, :]
UpperCAmelCase__ = in_proj_bias[2_56:5_12]
UpperCAmelCase__ = in_proj_weight[-2_56:, :]
UpperCAmelCase__ = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
UpperCAmelCase__ = state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
UpperCAmelCase__ = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCAmelCase__ = in_proj_weight_cross_attn[:2_56, :]
UpperCAmelCase__ = in_proj_bias_cross_attn[:2_56]
UpperCAmelCase__ = in_proj_weight_cross_attn[2_56:5_12, :]
UpperCAmelCase__ = in_proj_bias_cross_attn[2_56:5_12]
UpperCAmelCase__ = in_proj_weight_cross_attn[-2_56:, :]
UpperCAmelCase__ = in_proj_bias_cross_attn[-2_56:]
def UpperCamelCase_( ) -> Optional[int]:
UpperCAmelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase__ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_( snake_case__: Optional[int] , snake_case__: str=None , snake_case__: int=False ) -> List[str]:
UpperCAmelCase__ = get_detr_config(snake_case__ )
# load original model from torch hub
UpperCAmelCase__ = {
"""detr-resnet-50""": """detr_resnet50""",
"""detr-resnet-101""": """detr_resnet101""",
}
logger.info(f"Converting model {model_name}..." )
UpperCAmelCase__ = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=snake_case__ ).eval()
UpperCAmelCase__ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(snake_case__ ):
if is_panoptic:
UpperCAmelCase__ = """detr.""" + src
rename_key(snake_case__ , snake_case__ , snake_case__ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case__ , is_panoptic=snake_case__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase__ = """detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
UpperCAmelCase__ = state_dict.pop(snake_case__ )
UpperCAmelCase__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase__ = state_dict.pop(snake_case__ )
UpperCAmelCase__ = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
UpperCAmelCase__ = state_dict.pop(snake_case__ )
UpperCAmelCase__ = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
UpperCAmelCase__ = state_dict.pop(snake_case__ )
UpperCAmelCase__ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase__ = DetrForSegmentation(snake_case__ ) if is_panoptic else DetrForObjectDetection(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# verify our conversion on an image
UpperCAmelCase__ = """coco_panoptic""" if is_panoptic else """coco_detection"""
UpperCAmelCase__ = DetrImageProcessor(format=snake_case__ )
UpperCAmelCase__ = processor(images=prepare_img() , return_tensors='pt' )
UpperCAmelCase__ = encoding["""pixel_values"""]
UpperCAmelCase__ = detr(snake_case__ )
UpperCAmelCase__ = model(snake_case__ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(f"nielsr/{model_name}" )
processor.push_to_hub(f"nielsr/{model_name}" )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
_UpperCamelCase = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 360 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """sew-d"""
def __init__(self , __a=32 , __a=768 , __a=12 , __a=12 , __a=3072 , __a=2 , __a=512 , __a=256 , __a=True , __a=True , __a=("p2c", "c2p") , __a="layer_norm" , __a="gelu_python" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.1 , __a=0.02 , __a=1E-7 , __a=1E-5 , __a="group" , __a="gelu" , __a=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __a=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __a=False , __a=128 , __a=16 , __a=True , __a=0.05 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=0 , __a="mean" , __a=False , __a=False , __a=256 , __a=0 , __a=1 , __a=2 , **__a , ) -> str:
"""simple docstring"""
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = feat_extract_norm
UpperCAmelCase__ = feat_extract_activation
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = conv_bias
UpperCAmelCase__ = num_conv_pos_embeddings
UpperCAmelCase__ = num_conv_pos_embedding_groups
UpperCAmelCase__ = len(self.conv_dim )
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = squeeze_factor
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = position_buckets
UpperCAmelCase__ = share_att_key
UpperCAmelCase__ = relative_attention
UpperCAmelCase__ = norm_rel_ebd
UpperCAmelCase__ = list(__a )
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = feat_proj_dropout
UpperCAmelCase__ = final_dropout
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = feature_layer_norm_eps
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase__ = apply_spec_augment
UpperCAmelCase__ = mask_time_prob
UpperCAmelCase__ = mask_time_length
UpperCAmelCase__ = mask_time_min_masks
UpperCAmelCase__ = mask_feature_prob
UpperCAmelCase__ = mask_feature_length
UpperCAmelCase__ = mask_feature_min_masks
# ctc loss
UpperCAmelCase__ = ctc_loss_reduction
UpperCAmelCase__ = ctc_zero_infinity
# sequence classification
UpperCAmelCase__ = use_weighted_layer_sum
UpperCAmelCase__ = classifier_proj_size
@property
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 335 | 0 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__A =logging.get_logger('''transformers.models.speecht5''')
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
hf_model.apply_weight_norm()
lowerCamelCase_ = checkpoint['input_conv.weight_g']
lowerCamelCase_ = checkpoint['input_conv.weight_v']
lowerCamelCase_ = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
lowerCamelCase_ = checkpoint[F'upsamples.{i}.1.weight_g']
lowerCamelCase_ = checkpoint[F'upsamples.{i}.1.weight_v']
lowerCamelCase_ = checkpoint[F'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCamelCase_ = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_g']
lowerCamelCase_ = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_v']
lowerCamelCase_ = checkpoint[F'blocks.{i}.convs1.{j}.1.bias']
lowerCamelCase_ = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_g']
lowerCamelCase_ = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_v']
lowerCamelCase_ = checkpoint[F'blocks.{i}.convs2.{j}.1.bias']
lowerCamelCase_ = checkpoint['output_conv.1.weight_g']
lowerCamelCase_ = checkpoint['output_conv.1.weight_v']
lowerCamelCase_ = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ):
if config_path is not None:
lowerCamelCase_ = SpeechTaHifiGanConfig.from_pretrained(__lowercase )
else:
lowerCamelCase_ = SpeechTaHifiGanConfig()
lowerCamelCase_ = SpeechTaHifiGan(__lowercase )
lowerCamelCase_ = torch.load(__lowercase )
load_weights(orig_checkpoint["model"]["generator"] , __lowercase , __lowercase )
lowerCamelCase_ = np.load(__lowercase )
lowerCamelCase_ = stats[0].reshape(-1 )
lowerCamelCase_ = stats[1].reshape(-1 )
lowerCamelCase_ = torch.from_numpy(__lowercase ).float()
lowerCamelCase_ = torch.from_numpy(__lowercase ).float()
model.save_pretrained(__lowercase )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(__lowercase )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__A =parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 19 | from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Optional[Any] ,__lowercase : Optional[int]=8 ):
'''simple docstring'''
A_ : Optional[int] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A_ : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowercase , scheduler=lowercase , movq=lowercase , )
A_ : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
if latents is None:
A_ : List[str] = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
A_ : List[str] = latents.to(lowercase )
A_ : int = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self , lowercase=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
A_ : Dict = torch.device(F'''cuda:{gpu_id}''' )
A_ : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
def lowerCAmelCase_ ( self , lowercase=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
A_ : Tuple = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A_ : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
A_ , A_ : int = cpu_offload_with_hook(lowercase , lowercase , prev_module_hook=lowercase )
# We'll offload the last model manually.
A_ : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self , lowercase , lowercase , lowercase , lowercase = 5_1_2 , lowercase = 5_1_2 , lowercase = 1_0_0 , lowercase = 4.0 , lowercase = 1 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , ):
"""simple docstring"""
A_ : Dict = self._execution_device
A_ : Dict = guidance_scale > 1.0
if isinstance(lowercase , lowercase ):
A_ : Dict = torch.cat(lowercase , dim=0 )
if isinstance(lowercase , lowercase ):
A_ : str = torch.cat(lowercase , dim=0 )
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = torch.cat(lowercase , dim=0 )
A_ : str = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
A_ : str = image_embeds.repeat_interleave(lowercase , dim=0 )
A_ : Union[str, Any] = negative_image_embeds.repeat_interleave(lowercase , dim=0 )
A_ : Optional[Any] = hint.repeat_interleave(lowercase , dim=0 )
A_ : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase )
A_ : Optional[Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase )
self.scheduler.set_timesteps(lowercase , device=lowercase )
A_ : Any = self.scheduler.timesteps
A_ : str = self.movq.config.latent_channels
A_ , A_ : List[Any] = downscale_height_and_width(lowercase , lowercase , self.movq_scale_factor )
# create initial latent
A_ : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
A_ : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : Any = {'image_embeds': image_embeds, 'hint': hint}
A_ : Dict = self.unet(
sample=lowercase , timestep=lowercase , encoder_hidden_states=lowercase , added_cond_kwargs=lowercase , return_dict=lowercase , )[0]
if do_classifier_free_guidance:
A_ , A_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
A_ , A_ : List[str] = noise_pred.chunk(2 )
A_ , A_ : List[str] = variance_pred.chunk(2 )
A_ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A_ , A_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A_ : Tuple = self.scheduler.step(
lowercase , lowercase , lowercase , generator=lowercase , )[0]
# post-processing
A_ : Any = self.movq.decode(lowercase , force_not_quantize=lowercase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
A_ : Optional[Any] = image * 0.5 + 0.5
A_ : int = image.clamp(0 , 1 )
A_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ : Optional[int] = self.numpy_to_pil(lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase )
| 140 | 0 |
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __lowerCamelCase ( *a_ : Optional[int] , a_ : Optional[Union[Dict, Any]] = None , a_ : Any=True , a_ : Optional[int]=2 ) -> Any:
from .. import __version__
__SCREAMING_SNAKE_CASE :Optional[int] = take_from
__SCREAMING_SNAKE_CASE :str = ()
if not isinstance(args[0] , a_ ):
__SCREAMING_SNAKE_CASE :Optional[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(a_ ).base_version ) >= version.parse(a_ ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
__SCREAMING_SNAKE_CASE :Any = None
if isinstance(a_ , a_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(a_ ),)
__SCREAMING_SNAKE_CASE :Union[str, Any] = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(a_ , a_ ):
values += (getattr(a_ , a_ ),)
__SCREAMING_SNAKE_CASE :Optional[Any] = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__SCREAMING_SNAKE_CASE :Optional[int] = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__SCREAMING_SNAKE_CASE :Optional[Any] = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , a_ , stacklevel=a_ )
if isinstance(a_ , a_ ) and len(a_ ) > 0:
__SCREAMING_SNAKE_CASE :Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
__SCREAMING_SNAKE_CASE :Tuple = call_frame.filename
__SCREAMING_SNAKE_CASE :Tuple = call_frame.lineno
__SCREAMING_SNAKE_CASE :List[Any] = call_frame.function
__SCREAMING_SNAKE_CASE :List[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(a_ ) == 0:
return
elif len(a_ ) == 1:
return values[0]
return values
| 352 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@require_torch
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = pipeline(
task='''zero-shot-audio-classification''' ,model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
__SCREAMING_SNAKE_CASE :Any = load_dataset('''ashraq/esc50''' )
__SCREAMING_SNAKE_CASE :int = dataset['''train''']['''audio'''][-1]['''array''']
__SCREAMING_SNAKE_CASE :Dict = audio_classifier(SCREAMING_SNAKE_CASE__ ,candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) ,[{'''score''': 0.5_0_1, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_9_9, '''label''': '''Sound of vaccum cleaner'''}] ,)
@unittest.skip('''No models are available in TF''' )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
pass
@slow
@require_torch
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = pipeline(
task='''zero-shot-audio-classification''' ,model='''laion/clap-htsat-unfused''' ,)
# This is an audio of a dog
__SCREAMING_SNAKE_CASE :List[Any] = load_dataset('''ashraq/esc50''' )
__SCREAMING_SNAKE_CASE :Tuple = dataset['''train''']['''audio'''][-1]['''array''']
__SCREAMING_SNAKE_CASE :str = audio_classifier(SCREAMING_SNAKE_CASE__ ,candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) ,[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
] ,)
__SCREAMING_SNAKE_CASE :Dict = audio_classifier([audio] * 5 ,candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) ,[
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 ,)
__SCREAMING_SNAKE_CASE :Union[str, Any] = audio_classifier(
[audio] * 5 ,candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ,batch_size=5 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) ,[
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 ,)
@unittest.skip('''No models are available in TF''' )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
pass | 239 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(lowerCamelCase_ ):
return ext
raise Exception(
f'''Unable to determine file format from file extension {path}. '''
f'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
SCREAMING_SNAKE_CASE : Optional[int] = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
SCREAMING_SNAKE_CASE : Dict = PipelineDataFormat.from_str(
format=lowerCamelCase_ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(lowerCamelCase_ , lowerCamelCase_ )
class UpperCamelCase__ ( _a ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase_ : Pipeline , lowerCamelCase_ : PipelineDataFormat ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = nlp
SCREAMING_SNAKE_CASE : Union[str, Any] = reader
@staticmethod
def lowerCamelCase_ ( lowerCamelCase_ : ArgumentParser ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=__lowerCamelCase , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=__lowerCamelCase , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=__lowerCamelCase , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=__lowerCamelCase , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=__lowerCamelCase , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=__lowerCamelCase , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=__lowerCamelCase , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=__lowerCamelCase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=__lowerCamelCase )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._nlp, []
for entry in self._reader:
SCREAMING_SNAKE_CASE : Any = nlp(**__lowerCamelCase ) if self._reader.is_multi_columns else nlp(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
outputs.append(__lowerCamelCase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
SCREAMING_SNAKE_CASE : List[Any] = self._reader.save_binary(__lowerCamelCase )
logger.warning(f'''Current pipeline requires output to be in binary format, saving at {binary_path}''' )
else:
self._reader.save(__lowerCamelCase )
| 323 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
UpperCAmelCase_ : Any = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : Optional[int] , **__lowerCamelCase : Optional[int] ):
requires_backends(self , ["""bs4"""] )
super().__init__(**__lowerCamelCase )
def _A ( self : List[str] , __lowerCamelCase : Any ):
UpperCamelCase :Optional[int] = []
UpperCamelCase :List[str] = []
UpperCamelCase :Union[str, Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCamelCase :Optional[Any] = parent.find_all(child.name , recursive=__lowerCamelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__lowerCamelCase ) else next(i for i, s in enumerate(__lowerCamelCase , 1 ) if s is child ) )
UpperCamelCase :Any = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _A ( self : Any , __lowerCamelCase : Tuple ):
UpperCamelCase :Any = BeautifulSoup(__lowerCamelCase , """html.parser""" )
UpperCamelCase :Union[str, Any] = []
UpperCamelCase :Tuple = []
UpperCamelCase :Tuple = []
for element in html_code.descendants:
if type(__lowerCamelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCamelCase :Any = html.unescape(__lowerCamelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__lowerCamelCase )
UpperCamelCase , UpperCamelCase :Optional[Any] = self.xpath_soup(__lowerCamelCase )
stringaxtag_seq.append(__lowerCamelCase )
stringaxsubs_seq.append(__lowerCamelCase )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _A ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ):
UpperCamelCase :Tuple = """"""
for tagname, subs in zip(__lowerCamelCase , __lowerCamelCase ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self : Any , __lowerCamelCase : Dict ):
UpperCamelCase :Any = False
# Check that strings has a valid type
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :List[Any] = True
elif isinstance(__lowerCamelCase , (list, tuple) ):
if len(__lowerCamelCase ) == 0 or isinstance(html_strings[0] , __lowerCamelCase ):
UpperCamelCase :Any = True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
F"""but is of type {type(__lowerCamelCase )}.""" )
UpperCamelCase :str = bool(isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(html_strings[0] , __lowerCamelCase )) )
if not is_batched:
UpperCamelCase :Any = [html_strings]
# Get nodes + xpaths
UpperCamelCase :Union[str, Any] = []
UpperCamelCase :str = []
for html_string in html_strings:
UpperCamelCase , UpperCamelCase , UpperCamelCase :int = self.get_three_from_single(__lowerCamelCase )
nodes.append(__lowerCamelCase )
UpperCamelCase :int = []
for node, tag_list, sub_list in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :str = self.construct_xpath(__lowerCamelCase , __lowerCamelCase )
xpath_strings.append(__lowerCamelCase )
xpaths.append(__lowerCamelCase )
# return as Dict
UpperCamelCase :Optional[int] = {"""nodes""": nodes, """xpaths""": xpaths}
UpperCamelCase :Any = BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
return encoded_inputs
| 38 | 0 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : int = logging.get_logger()
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : Optional[Any] = 42
__UpperCamelCase : str = field(default_factory=_A )
__UpperCamelCase : Union[str, Any] = field(default_factory=_A )
def lowerCAmelCase__ ( self : Dict , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Optional[Any] ):
UpperCamelCase_: List[str] = len(list(m.modules() ) ) == 1 or isinstance(__a , nn.Convad ) or isinstance(__a , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__a )
def __call__( self : int , snake_case_ : Union[str, Any] ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__a )
[x.remove() for x in self.handles]
return self
@property
def lowerCAmelCase__ ( self : str ):
return list(filter(lambda snake_case_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : Dict = 42
__UpperCamelCase : Optional[int] = 42
__UpperCamelCase : Optional[int] = 0
__UpperCamelCase : List[str] = field(default_factory=_A )
__UpperCamelCase : Any = field(default_factory=_A )
def __call__( self : Union[str, Any] , snake_case_ : Union[str, Any] ):
UpperCamelCase_: Any = Tracker(self.dest )(__a ).parametrized
UpperCamelCase_: Optional[Any] = Tracker(self.src )(__a ).parametrized
UpperCamelCase_: str = list(filter(lambda snake_case_ : type(__a ) not in self.src_skip , __a ) )
UpperCamelCase_: Optional[Any] = list(filter(lambda snake_case_ : type(__a ) not in self.dest_skip , __a ) )
if len(__a ) != len(__a ):
raise Exception(
f'''Numbers of operations are different. Source module has {len(__a )} operations while'''
f''' destination module has {len(__a )}.''' )
for dest_m, src_m in zip(__a , __a ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = True ) -> Any:
print(F'''Converting {name}...''' )
with torch.no_grad():
UpperCamelCase_: str = timm.create_model(__snake_case , pretrained=__snake_case ).eval()
UpperCamelCase_: Optional[Any] = ResNetForImageClassification(__snake_case ).eval()
UpperCamelCase_: Union[str, Any] = ModuleTransfer(src=__snake_case , dest=__snake_case )
UpperCamelCase_: str = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(__snake_case )
assert torch.allclose(from_model(__snake_case ) , our_model(__snake_case ).logits ), "The model logits don't match the original one."
UpperCamelCase_: Dict = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(__snake_case )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=__snake_case , )
# we can use the convnext one
UpperCamelCase_: List[Any] = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=__snake_case , )
print(F'''Pushed {checkpoint_name}''' )
def A__ ( lowerCamelCase , lowerCamelCase = None , lowerCamelCase = True ) -> Union[str, Any]:
UpperCamelCase_: List[str] = """imagenet-1k-id2label.json"""
UpperCamelCase_: int = 10_00
UpperCamelCase_: Optional[Any] = (1, num_labels)
UpperCamelCase_: Tuple = """huggingface/label-files"""
UpperCamelCase_: Dict = num_labels
UpperCamelCase_: Optional[int] = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase_: Union[str, Any] = {int(__snake_case ): v for k, v in idalabel.items()}
UpperCamelCase_: Any = idalabel
UpperCamelCase_: Optional[Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase_: str = partial(__snake_case , num_labels=__snake_case , idalabel=__snake_case , labelaid=__snake_case )
UpperCamelCase_: Union[str, Any] = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(__snake_case , names_to_config[model_name] , __snake_case , __snake_case )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__snake_case , __snake_case , __snake_case , __snake_case )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowerCamelCase_ : int = parser.parse_args()
lowerCamelCase_ : List[Any] = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 362 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Any = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
UpperCamelCase_: str = load_dataset("""ashraq/esc50""" )
UpperCamelCase_: Optional[int] = dataset["""train"""]["""audio"""][-1]["""array"""]
UpperCamelCase_: Optional[int] = audio_classifier(snake_case_ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(snake_case_ ) , [{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def lowerCAmelCase__ ( self : Optional[Any] ):
pass
@slow
@require_torch
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: List[str] = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
UpperCamelCase_: Tuple = load_dataset("""ashraq/esc50""" )
UpperCamelCase_: Optional[int] = dataset["""train"""]["""audio"""][-1]["""array"""]
UpperCamelCase_: int = audio_classifier(snake_case_ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(snake_case_ ) , [
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
] , )
UpperCamelCase_: Any = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(snake_case_ ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
UpperCamelCase_: Any = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(snake_case_ ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def lowerCAmelCase__ ( self : Optional[Any] ):
pass
| 223 | 0 |
'''simple docstring'''
from math import factorial
def __lowerCamelCase ( __snake_case : Union[str, Any] = 20 ) -> int:
"""simple docstring"""
A__ : List[str] =2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
A__ : Optional[Any] =n // 2
return int(factorial(_SCREAMING_SNAKE_CASE ) / (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
__snake_case : List[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 134 |
def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int:
lowerCamelCase : Tuple = 1
lowerCamelCase : int = 1
lowerCamelCase : Optional[Any] = {1: 1}
for inputa in range(2 ,_SCREAMING_SNAKE_CASE ):
lowerCamelCase : Union[str, Any] = 0
lowerCamelCase : List[str] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCamelCase : str = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCamelCase : str = counter
if counter > pre_counter:
lowerCamelCase : str = inputa
lowerCamelCase : Any = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 48 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
"""weiweishi/roc-bert-base-zh""": """https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json""",
}
class __lowercase ( a__ ):
"""simple docstring"""
_UpperCAmelCase : Optional[int] = "roc_bert"
def __init__( self : int , lowerCAmelCase__ : List[str]=3_0522 , lowerCAmelCase__ : Optional[int]=768 , lowerCAmelCase__ : int=12 , lowerCAmelCase__ : str=12 , lowerCAmelCase__ : Any=3072 , lowerCAmelCase__ : Union[str, Any]="gelu" , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : Optional[int]=512 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : Optional[Any]=0.02 , lowerCAmelCase__ : Any=1E-12 , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Any=0 , lowerCAmelCase__ : int="absolute" , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Optional[int]=768 , lowerCAmelCase__ : Optional[int]=910 , lowerCAmelCase__ : Union[str, Any]=512 , lowerCAmelCase__ : int=2_4858 , lowerCAmelCase__ : int=True , **lowerCAmelCase__ : Any , ):
SCREAMING_SNAKE_CASE_: Any = vocab_size
SCREAMING_SNAKE_CASE_: Any = max_position_embeddings
SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_: Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_: Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_: Dict = intermediate_size
SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_: Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple = initializer_range
SCREAMING_SNAKE_CASE_: Any = type_vocab_size
SCREAMING_SNAKE_CASE_: Any = layer_norm_eps
SCREAMING_SNAKE_CASE_: Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE_: Any = enable_pronunciation
SCREAMING_SNAKE_CASE_: List[Any] = enable_shape
SCREAMING_SNAKE_CASE_: List[str] = pronunciation_embed_dim
SCREAMING_SNAKE_CASE_: Optional[int] = pronunciation_vocab_size
SCREAMING_SNAKE_CASE_: str = shape_embed_dim
SCREAMING_SNAKE_CASE_: Tuple = shape_vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] = concat_input
SCREAMING_SNAKE_CASE_: Dict = position_embedding_type
SCREAMING_SNAKE_CASE_: List[str] = classifier_dropout
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
| 365 |
def A_ ( _UpperCAmelCase = 10**9 ):
SCREAMING_SNAKE_CASE_: List[str] = 1
SCREAMING_SNAKE_CASE_: Optional[int] = 2
SCREAMING_SNAKE_CASE_: int = 0
SCREAMING_SNAKE_CASE_: Dict = 0
SCREAMING_SNAKE_CASE_: List[str] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
SCREAMING_SNAKE_CASE_: Any = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 127 | 0 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Any =['keras_nlp']
def __init__( self : int , *__lowercase : Optional[int] , **__lowercase : Tuple ):
'''simple docstring'''
requires_backends(self , ["""keras_nlp"""] )
| 302 |
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = val
__a = None
__a = None
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Any ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
__a = Node(__lowercase )
else:
self.left.insert(__lowercase )
elif val > self.val:
if self.right is None:
__a = Node(__lowercase )
else:
self.right.insert(__lowercase )
else:
__a = val
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if root:
inorder(root.left , _SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) == 0:
return arr
__a = Node(arr[0] )
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
__a = []
inorder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 302 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_A = logging.get_logger(__name__)
_A = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'convnextv2'
def __init__(self , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="gelu" , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=0.0 , _lowerCamelCase=224 , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
UpperCAmelCase__ : List[str] = num_channels
UpperCAmelCase__ : Dict = patch_size
UpperCAmelCase__ : Dict = num_stages
UpperCAmelCase__ : str = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
UpperCAmelCase__ : Tuple = [3, 3, 9, 3] if depths is None else depths
UpperCAmelCase__ : List[Any] = hidden_act
UpperCAmelCase__ : Dict = initializer_range
UpperCAmelCase__ : Tuple = layer_norm_eps
UpperCAmelCase__ : Optional[int] = drop_path_rate
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : Any = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
| 166 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def a__ ( lowerCAmelCase ) -> Tuple:
UpperCAmelCase__ : Optional[int] = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
UpperCAmelCase__ : Dict = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
UpperCAmelCase__ : int = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase__ : Optional[int] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
UpperCAmelCase__ : Union[str, Any] = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(lowerCAmelCase )-1}""" )
if "norm" in key:
UpperCAmelCase__ : Optional[Any] = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase__ : int = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
UpperCAmelCase__ : Union[str, Any] = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(lowerCAmelCase )-1}""" )
if "layer_norm1" in key:
UpperCAmelCase__ : Any = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
UpperCAmelCase__ : Union[str, Any] = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase__ : int = key[key.find("""block""" ) + len("""block""" )]
UpperCAmelCase__ : List[Any] = key.replace(F"""block{idx}""" , F"""block.{int(lowerCAmelCase )-1}""" )
if "attn.q" in key:
UpperCAmelCase__ : List[Any] = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
UpperCAmelCase__ : Tuple = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
UpperCAmelCase__ : Union[str, Any] = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
UpperCAmelCase__ : int = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
UpperCAmelCase__ : List[Any] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
UpperCAmelCase__ : Optional[Any] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
UpperCAmelCase__ : Optional[Any] = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
UpperCAmelCase__ : Optional[Any] = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase__ : List[Any] = key[key.find("""linear_c""" ) + len("""linear_c""" )]
UpperCAmelCase__ : int = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(lowerCAmelCase )-1}""" )
if "bot_conv" in key:
UpperCAmelCase__ : int = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
UpperCAmelCase__ : List[Any] = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
UpperCAmelCase__ : List[Any] = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
UpperCAmelCase__ : Optional[Any] = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
UpperCAmelCase__ : List[str] = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
UpperCAmelCase__ : int = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
UpperCAmelCase__ : Union[str, Any] = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
UpperCAmelCase__ : Optional[int] = key.replace("""module.last_layer_depth""" , """head.head""" )
UpperCAmelCase__ : Optional[Any] = value
return new_state_dict
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Dict:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase__ : Dict = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
UpperCAmelCase__ : int = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase__ : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase__ : int = kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase__ : int = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase__ : List[Any] = kv_bias[config.hidden_sizes[i] :]
def a__ ( ) -> int:
UpperCAmelCase__ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase__ : int = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return image
@torch.no_grad()
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=None ) -> Union[str, Any]:
UpperCAmelCase__ : Any = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
UpperCAmelCase__ : Any = GLPNImageProcessor()
# prepare image
UpperCAmelCase__ : List[str] = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
UpperCAmelCase__ : Tuple = torch.load(lowerCAmelCase , map_location=torch.device("""cpu""" ) )
# rename keys
UpperCAmelCase__ : Optional[Any] = rename_keys(lowerCAmelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCAmelCase , lowerCAmelCase )
# create HuggingFace model and load state dict
UpperCAmelCase__ : Union[str, Any] = GLPNForDepthEstimation(lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
model.eval()
# forward pass
UpperCAmelCase__ : Any = model(lowerCAmelCase )
UpperCAmelCase__ : Tuple = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCAmelCase__ : int = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
UpperCAmelCase__ : Any = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCAmelCase , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase , lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase , lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase , )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
_A = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 166 | 1 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase :
def __init__( self , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase="resnet50" , lowercase=3 , lowercase=32 , lowercase=3 , lowercase=True , lowercase=True , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = out_indices if out_indices is not None else [4]
lowerCAmelCase = stage_names
lowerCAmelCase = out_features
lowerCAmelCase = backbone
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = num_channels
lowerCAmelCase = use_pretrained_backbone
lowerCAmelCase = is_training
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = self.get_config()
return config, pixel_values
def _snake_case ( self ) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def _snake_case ( self , lowercase , lowercase ) -> str:
lowerCAmelCase = TimmBackbone(config=A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(A_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (TimmBackbone,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _snake_case ( self ) -> Any:
lowerCAmelCase = TimmBackboneModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ )
def _snake_case ( self ) -> Optional[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = '''resnet18'''
lowerCAmelCase = '''microsoft/resnet-18'''
lowerCAmelCase = AutoBackbone.from_pretrained(A_ , use_timm_backbone=A_ )
lowerCAmelCase = AutoBackbone.from_pretrained(A_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowerCAmelCase = AutoBackbone.from_pretrained(A_ , use_timm_backbone=A_ , out_indices=[1, 2, 3] )
lowerCAmelCase = AutoBackbone.from_pretrained(A_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn\'t support feed forward chunking""" )
def _snake_case ( self ) -> Optional[int]:
pass
@unittest.skip("""TimmBackbone doesn\'t have num_hidden_layers attribute""" )
def _snake_case ( self ) -> Dict:
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def _snake_case ( self ) -> Any:
pass
@unittest.skip("""TimmBackbone models doesn\'t have inputs_embeds""" )
def _snake_case ( self ) -> Optional[Any]:
pass
@unittest.skip("""TimmBackbone models doesn\'t have inputs_embeds""" )
def _snake_case ( self ) -> List[Any]:
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def _snake_case ( self ) -> Optional[int]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def _snake_case ( self ) -> Tuple:
pass
@unittest.skip("""model weights aren\'t tied in TimmBackbone.""" )
def _snake_case ( self ) -> str:
pass
@unittest.skip("""model weights aren\'t tied in TimmBackbone.""" )
def _snake_case ( self ) -> List[Any]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def _snake_case ( self ) -> Optional[int]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def _snake_case ( self ) -> List[str]:
pass
@unittest.skip("""TimmBackbone doesn\'t have hidden size info in its configuration.""" )
def _snake_case ( self ) -> str:
pass
@unittest.skip("""TimmBackbone doesn\'t support output_attentions.""" )
def _snake_case ( self ) -> str:
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def _snake_case ( self ) -> Tuple:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case ( self ) -> Any:
pass
def _snake_case ( self ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(A_ )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_ )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = True
lowerCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCAmelCase = self.all_model_classes[0]
lowerCAmelCase = model_class(A_ )
model.to(A_ )
lowerCAmelCase = self._prepare_for_class(A_ , A_ )
lowerCAmelCase = model(**A_ )
lowerCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=A_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def _snake_case ( self ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(**A_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCAmelCase = copy.deepcopy(A_ )
lowerCAmelCase = None
lowerCAmelCase = model_class(A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(**A_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowerCAmelCase = copy.deepcopy(A_ )
lowerCAmelCase = False
lowerCAmelCase = model_class(A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(**A_ )
| 46 |
def _lowercase ( lowercase__ , lowercase__ ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275 | 0 |
def __lowerCamelCase ( lowerCamelCase__ : int = 100 ):
'''simple docstring'''
lowerCamelCase = (n * (n + 1) // 2) ** 2
lowerCamelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 66 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
enable_full_determinism()
class __lowercase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = UNetaDModel
UpperCamelCase : Union[str, Any] = "sample"
@property
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = 4
lowerCamelCase = 3
lowerCamelCase = (32, 32)
lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(A )
lowerCamelCase = torch.tensor([10] ).to(A )
return {"sample": noise, "timestep": time_step}
@property
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return (3, 32, 32)
@property
def __A ( self ) -> Dict:
'''simple docstring'''
return (3, 32, 32)
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
class __lowercase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : int = UNetaDModel
UpperCamelCase : Dict = "sample"
@property
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = 4
lowerCamelCase = 4
lowerCamelCase = (32, 32)
lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(A )
lowerCamelCase = torch.tensor([10] ).to(A )
return {"sample": noise, "timestep": time_step}
@property
def __A ( self ) -> Tuple:
'''simple docstring'''
return (4, 32, 32)
@property
def __A ( self ) -> Dict:
'''simple docstring'''
return (4, 32, 32)
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=A )
self.assertIsNotNone(A )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(A )
lowerCamelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=A )
model.to(A )
lowerCamelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=A )
model_accelerate.to(A )
model_accelerate.eval()
lowerCamelCase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCamelCase = noise.to(A )
lowerCamelCase = torch.tensor([10] * noise.shape[0] ).to(A )
lowerCamelCase = model_accelerate(A , A )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowerCamelCase , lowerCamelCase = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=A , low_cpu_mem_usage=A )
model_normal_load.to(A )
model_normal_load.eval()
lowerCamelCase = model_normal_load(A , A )["""sample"""]
assert torch_all_close(A , A , rtol=1e-3 )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(A )
lowerCamelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCamelCase = noise.to(A )
lowerCamelCase = torch.tensor([10] * noise.shape[0] ).to(A )
with torch.no_grad():
lowerCamelCase = model(A , A ).sample
lowerCamelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCamelCase = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(A , A , rtol=1e-3 ) )
class __lowercase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[str] = UNetaDModel
UpperCamelCase : Optional[int] = "sample"
@property
def __A ( self , A=(32, 32) ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = 4
lowerCamelCase = 3
lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(A )
lowerCamelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=A )
return {"sample": noise, "timestep": time_step}
@property
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return (3, 32, 32)
@property
def __A ( self ) -> Dict:
'''simple docstring'''
return (3, 32, 32)
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1e-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
@slow
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=A )
self.assertIsNotNone(A )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(A )
lowerCamelCase = self.dummy_input
lowerCamelCase = floats_tensor((4, 3) + (2_56, 2_56) ).to(A )
lowerCamelCase = noise
lowerCamelCase = model(**A )
assert image is not None, "Make sure output is not None"
@slow
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(A )
lowerCamelCase = 4
lowerCamelCase = 3
lowerCamelCase = (2_56, 2_56)
lowerCamelCase = torch.ones((batch_size, num_channels) + sizes ).to(A )
lowerCamelCase = torch.tensor(batch_size * [1e-4] ).to(A )
with torch.no_grad():
lowerCamelCase = model(A , A ).sample
lowerCamelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCamelCase = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(A , A , rtol=1e-2 ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(A )
lowerCamelCase = 4
lowerCamelCase = 3
lowerCamelCase = (32, 32)
lowerCamelCase = torch.ones((batch_size, num_channels) + sizes ).to(A )
lowerCamelCase = torch.tensor(batch_size * [1e-4] ).to(A )
with torch.no_grad():
lowerCamelCase = model(A , A ).sample
lowerCamelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCamelCase = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(A , A , rtol=1e-2 ) )
def __A ( self ) -> List[str]:
'''simple docstring'''
pass
| 66 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __UpperCamelCase ( unittest.TestCase ):
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.dummy_uncond_unet
lowerCamelCase_ =ScoreSdeVeScheduler()
lowerCamelCase_ =ScoreSdeVePipeline(unet=lowerCAmelCase, scheduler=lowerCAmelCase )
sde_ve.to(lowerCAmelCase )
sde_ve.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =torch.manual_seed(0 )
lowerCamelCase_ =sde_ve(num_inference_steps=2, output_type='''numpy''', generator=lowerCAmelCase ).images
lowerCamelCase_ =torch.manual_seed(0 )
lowerCamelCase_ =sde_ve(num_inference_steps=2, output_type='''numpy''', generator=lowerCAmelCase, return_dict=lowerCAmelCase )[
0
]
lowerCamelCase_ =image[0, -3:, -3:, -1]
lowerCamelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''google/ncsnpp-church-256'''
lowerCamelCase_ =UNetaDModel.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =ScoreSdeVeScheduler.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =ScoreSdeVePipeline(unet=lowerCAmelCase, scheduler=lowerCAmelCase )
sde_ve.to(lowerCAmelCase )
sde_ve.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =torch.manual_seed(0 )
lowerCamelCase_ =sde_ve(num_inference_steps=10, output_type='''numpy''', generator=lowerCAmelCase ).images
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCamelCase_ =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 75 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a__ : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase_ ( a__ ):
__UpperCAmelCase = ['pixel_values']
def __init__( self , a = True , a = None , a = PILImageResampling.BICUBIC , a = True , a = None , a = True , a = 1 / 2_55 , a = True , a = None , a = None , a = True , **a , ):
super().__init__(**a )
UpperCamelCase__ = size if size is not None else {"shortest_edge": 2_24}
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCamelCase__ = get_size_dict(a , default_to_square=a , param_name="crop_size" )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase__ = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase__ = do_convert_rgb
def __a ( self , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase__ = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
return rescale(a , scale=a , data_format=a , **a )
def __a ( self , a , a , a , a = None , **a , ):
return normalize(a , mean=a , std=a , data_format=a , **a )
def __a ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(a , param_name="size" , default_to_square=a )
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(a , param_name="crop_size" , default_to_square=a )
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase__ = [convert_to_rgb(a ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=a , mean=a , std=a ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(a , a ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 80 | 0 |
lowerCAmelCase : Tuple ='''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase : str =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase : List[str] ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 369 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
class a_ ( _lowerCAmelCase ):
__A = ["input_features"]
def __init__( self : Any , lowercase : Tuple=80 , lowercase : Optional[int]=16_000 , lowercase : Optional[Any]=160 , lowercase : Optional[int]=30 , lowercase : List[Any]=400 , lowercase : Dict=0.0 , lowercase : Tuple=False , **lowercase : Optional[int] , ):
"""simple docstring"""
super().__init__(
feature_size=lowercase , sampling_rate=lowercase , padding_value=lowercase , return_attention_mask=lowercase , **lowercase , )
lowercase_ :Optional[int] = n_fft
lowercase_ :List[Any] = hop_length
lowercase_ :Tuple = chunk_length
lowercase_ :List[str] = chunk_length * sampling_rate
lowercase_ :Optional[Any] = self.n_samples // hop_length
lowercase_ :Any = sampling_rate
lowercase_ :List[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowercase , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=lowercase , norm="slaney" , mel_scale="slaney" , )
def lowercase__ ( self : str , lowercase : np.array ):
"""simple docstring"""
lowercase_ :Any = spectrogram(
lowercase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
lowercase_ :Any = log_spec[:, :-1]
lowercase_ :List[Any] = np.maximum(lowercase , log_spec.max() - 8.0 )
lowercase_ :Dict = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowercase__ ( lowercase : List[np.ndarray] , lowercase : List[np.ndarray] , lowercase : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
lowercase_ :Optional[int] = np.array(lowercase , np.intaa )
lowercase_ :Any = []
for vector, length in zip(lowercase , attention_mask.sum(-1 ) ):
lowercase_ :Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowercase_ :List[Any] = padding_value
normed_input_values.append(lowercase )
else:
lowercase_ :List[Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : Tuple , lowercase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase : bool = True , lowercase : Optional[int] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : Optional[bool] = None , lowercase : Optional[str] = "max_length" , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : Optional[bool] = None , **lowercase : Union[str, Any] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase_ :List[str] = isinstance(lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowercase_ :Optional[Any] = is_batched_numpy or (
isinstance(lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase_ :Any = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowercase , np.ndarray ):
lowercase_ :List[Any] = np.asarray(lowercase , dtype=np.floataa )
elif isinstance(lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase_ :Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase_ :Optional[int] = [np.asarray([raw_speech] ).T]
lowercase_ :int = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
lowercase_ :Tuple = self.pad(
lowercase , padding=lowercase , max_length=max_length if max_length else self.n_samples , truncation=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase_ :Union[str, Any] = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
lowercase_ :List[Any] = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
lowercase_ :Union[str, Any] = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
lowercase_ :List[str] = [self._np_extract_fbank_features(lowercase ) for waveform in input_features[0]]
if isinstance(input_features[0] , lowercase ):
lowercase_ :Tuple = [np.asarray(lowercase , dtype=np.floataa ) for feature in input_features]
else:
lowercase_ :Union[str, Any] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase_ :Dict = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
lowercase_ :Tuple = padded_inputs.convert_to_tensors(lowercase )
return padded_inputs
def lowercase__ ( self : List[str] ):
"""simple docstring"""
lowercase_ :Union[str, Any] = copy.deepcopy(self.__dict__ )
lowercase_ :List[str] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 147 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : List[Any] = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __a (lowerCamelCase ):
__a : Tuple = "deformable_detr"
__a : Dict = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Optional[Any] , __magic_name__ : Any=True , __magic_name__ : Union[str, Any]=None , __magic_name__ : Dict=3 , __magic_name__ : Tuple=3_00 , __magic_name__ : Union[str, Any]=10_24 , __magic_name__ : Tuple=6 , __magic_name__ : Any=10_24 , __magic_name__ : Optional[int]=8 , __magic_name__ : List[Any]=6 , __magic_name__ : Optional[int]=10_24 , __magic_name__ : Optional[Any]=8 , __magic_name__ : List[Any]=0.0 , __magic_name__ : List[str]=True , __magic_name__ : int="relu" , __magic_name__ : Dict=2_56 , __magic_name__ : Any=0.1 , __magic_name__ : List[str]=0.0 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : Optional[Any]=0.0_2 , __magic_name__ : Tuple=1.0 , __magic_name__ : str=True , __magic_name__ : str=False , __magic_name__ : Tuple="sine" , __magic_name__ : List[Any]="resnet50" , __magic_name__ : Optional[int]=True , __magic_name__ : List[Any]=False , __magic_name__ : Tuple=4 , __magic_name__ : Optional[Any]=4 , __magic_name__ : Any=4 , __magic_name__ : List[Any]=False , __magic_name__ : Optional[int]=3_00 , __magic_name__ : Optional[int]=False , __magic_name__ : Tuple=1 , __magic_name__ : Dict=5 , __magic_name__ : Any=2 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : Tuple=1 , __magic_name__ : List[str]=5 , __magic_name__ : str=2 , __magic_name__ : Dict=0.1 , __magic_name__ : Tuple=0.2_5 , __magic_name__ : Any=False , **__magic_name__ : int , ) -> Any:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCAmelCase_ : List[Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__magic_name__ , __magic_name__ ):
UpperCAmelCase_ : Any = backbone_config.get('''model_type''' )
UpperCAmelCase_ : Tuple = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : List[str] = config_class.from_dict(__magic_name__ )
UpperCAmelCase_ : str = use_timm_backbone
UpperCAmelCase_ : Tuple = backbone_config
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : Tuple = num_queries
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : str = d_model
UpperCAmelCase_ : Optional[int] = encoder_ffn_dim
UpperCAmelCase_ : List[Any] = encoder_layers
UpperCAmelCase_ : Dict = encoder_attention_heads
UpperCAmelCase_ : List[str] = decoder_ffn_dim
UpperCAmelCase_ : Union[str, Any] = decoder_layers
UpperCAmelCase_ : Optional[int] = decoder_attention_heads
UpperCAmelCase_ : Optional[Any] = dropout
UpperCAmelCase_ : Any = attention_dropout
UpperCAmelCase_ : Optional[int] = activation_dropout
UpperCAmelCase_ : str = activation_function
UpperCAmelCase_ : Tuple = init_std
UpperCAmelCase_ : List[Any] = init_xavier_std
UpperCAmelCase_ : Optional[int] = encoder_layerdrop
UpperCAmelCase_ : List[Any] = auxiliary_loss
UpperCAmelCase_ : Tuple = position_embedding_type
UpperCAmelCase_ : Dict = backbone
UpperCAmelCase_ : str = use_pretrained_backbone
UpperCAmelCase_ : int = dilation
# deformable attributes
UpperCAmelCase_ : List[Any] = num_feature_levels
UpperCAmelCase_ : Optional[Any] = encoder_n_points
UpperCAmelCase_ : str = decoder_n_points
UpperCAmelCase_ : List[Any] = two_stage
UpperCAmelCase_ : Dict = two_stage_num_proposals
UpperCAmelCase_ : Optional[Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
UpperCAmelCase_ : Union[str, Any] = class_cost
UpperCAmelCase_ : Tuple = bbox_cost
UpperCAmelCase_ : Union[str, Any] = giou_cost
# Loss coefficients
UpperCAmelCase_ : str = mask_loss_coefficient
UpperCAmelCase_ : Optional[int] = dice_loss_coefficient
UpperCAmelCase_ : Tuple = bbox_loss_coefficient
UpperCAmelCase_ : List[Any] = giou_loss_coefficient
UpperCAmelCase_ : Dict = eos_coefficient
UpperCAmelCase_ : List[str] = focal_alpha
UpperCAmelCase_ : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def UpperCAmelCase__ ( self : Dict ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self : str ) -> int:
"""simple docstring"""
return self.d_model
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCAmelCase_ : List[Any] = self.backbone_config.to_dict()
UpperCAmelCase_ : Optional[Any] = self.__class__.model_type
return output
| 125 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : Dict = 3
UpperCAmelCase_ : Dict = (32, 32)
UpperCAmelCase_ : str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__magic_name__ )
return image
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__magic_name__ )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
def extract(*__magic_name__ : Dict , **__magic_name__ : Any ):
class __a :
def __init__( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = torch.ones([0] )
def UpperCAmelCase__ ( self : int , __magic_name__ : int ) -> List[str]:
"""simple docstring"""
self.pixel_values.to(__magic_name__ )
return self
return Out()
return extract
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.dummy_cond_unet
UpperCAmelCase_ : Any = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
UpperCAmelCase_ : List[Any] = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : str = StableDiffusionPipeline(
unet=__magic_name__ , scheduler=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ : List[str] = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : List[Any] = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=__magic_name__ ).manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] , generator=__magic_name__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
UpperCAmelCase_ : Optional[Any] = output.images
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=__magic_name__ ).manual_seed(0 )
UpperCAmelCase_ : Optional[int] = sd_pipe(
[prompt] , generator=__magic_name__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__magic_name__ , )[0]
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : str = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.dummy_cond_unet
UpperCAmelCase_ : str = PNDMScheduler(skip_prk_steps=__magic_name__ )
UpperCAmelCase_ : Any = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Optional[Any] = StableDiffusionPipeline(
unet=__magic_name__ , scheduler=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ : str = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : List[str] = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=__magic_name__ ).manual_seed(0 )
UpperCAmelCase_ : Optional[int] = sd_pipe([prompt] , generator=__magic_name__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : str = torch.Generator(device=__magic_name__ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe(
[prompt] , generator=__magic_name__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__magic_name__ , )[0]
UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : str = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
assert isinstance(pipe.scheduler , __magic_name__ )
assert pipe.safety_checker is None
UpperCAmelCase_ : str = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(__magic_name__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase_ : int = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.dummy_cond_unet
UpperCAmelCase_ : Tuple = PNDMScheduler(skip_prk_steps=__magic_name__ )
UpperCAmelCase_ : Tuple = self.dummy_vae
UpperCAmelCase_ : Optional[Any] = self.dummy_text_encoder
UpperCAmelCase_ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
UpperCAmelCase_ : Union[str, Any] = unet.half()
UpperCAmelCase_ : Any = vae.half()
UpperCAmelCase_ : Tuple = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline(
unet=__magic_name__ , scheduler=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ : str = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Tuple = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ : List[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__magic_name__ )
UpperCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : Any = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Optional[int] = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
UpperCAmelCase_ : str = 40_03_66_03_46
UpperCAmelCase_ : Any = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(__magic_name__ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] , generator=__magic_name__ , guidance_scale=__magic_name__ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
UpperCAmelCase_ : Tuple = torch.manual_seed(__magic_name__ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] , generator=__magic_name__ , guidance_scale=__magic_name__ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : int = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__magic_name__ )
UpperCAmelCase_ : int = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : Optional[Any] = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Any = '''padme amidala taking a bath artwork, safe for work, no nudity'''
UpperCAmelCase_ : Tuple = 27_34_97_17_55
UpperCAmelCase_ : List[str] = 7
UpperCAmelCase_ : Any = torch.manual_seed(__magic_name__ )
UpperCAmelCase_ : int = sd_pipe(
[prompt] , generator=__magic_name__ , guidance_scale=__magic_name__ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
UpperCAmelCase_ : str = output.images
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[str] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
UpperCAmelCase_ : List[Any] = torch.manual_seed(__magic_name__ )
UpperCAmelCase_ : Any = sd_pipe(
[prompt] , generator=__magic_name__ , guidance_scale=__magic_name__ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[str] = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : int = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
UpperCAmelCase_ : Any = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Optional[int] = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
UpperCAmelCase_ : str = 10_44_35_52_34
UpperCAmelCase_ : int = 12
UpperCAmelCase_ : int = torch.manual_seed(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = sd_pipe(
[prompt] , generator=__magic_name__ , guidance_scale=__magic_name__ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
UpperCAmelCase_ : int = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
UpperCAmelCase_ : List[str] = torch.manual_seed(__magic_name__ )
UpperCAmelCase_ : int = sd_pipe(
[prompt] , generator=__magic_name__ , guidance_scale=__magic_name__ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase_ : Any = output.images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 125 | 1 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__snake_case = logging.get_logger(__name__)
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
_a = question_encoder
_a = generator
_a = self.question_encoder
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> str:
if os.path.isfile(__UpperCAmelCase ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
_a = os.path.join(__UpperCAmelCase , '''question_encoder_tokenizer''' )
_a = os.path.join(__UpperCAmelCase , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(__UpperCAmelCase )
self.generator.save_pretrained(__UpperCAmelCase )
@classmethod
def _UpperCAmelCase ( cls , __UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_a = kwargs.pop('''config''' , __UpperCAmelCase )
if config is None:
_a = RagConfig.from_pretrained(__UpperCAmelCase )
_a = AutoTokenizer.from_pretrained(
__UpperCAmelCase , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
_a = AutoTokenizer.from_pretrained(
__UpperCAmelCase , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=__UpperCAmelCase , generator=__UpperCAmelCase )
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
return self.current_tokenizer(*__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
return self.generator.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
return self.generator.decode(*__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
_a = self.question_encoder
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = self.generator
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "longest" , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> Union[str, Any]:
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , __UpperCAmelCase , )
if max_length is None:
_a = self.current_tokenizer.model_max_length
_a = self(
__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , max_length=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , **__UpperCAmelCase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_a = self.current_tokenizer.model_max_length
_a = self(
text_target=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , **__UpperCAmelCase , )
_a = labels['input_ids']
return model_inputs | 362 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : int = 10_00 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3, n + 1 ) )
if __name__ == "__main__":
print(solution()) | 153 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : float , _snake_case : float , _snake_case : float , _snake_case : float , _snake_case : float , ) -> float:
'''simple docstring'''
_A = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
_A = 1 - (matter_density + radiation_density + dark_energy)
_A = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_A = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
a = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 315 |
"""simple docstring"""
import numpy
class lowercase_ :
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : numpy.ndarray ):
_A = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_A = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_A = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_A = numpy.random.rand(3 , 1 )
# Real output values provided.
_A = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_A = numpy.zeros(output_array.shape )
def lowerCAmelCase_ ( self : List[str] ):
_A = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def lowerCAmelCase_ ( self : Optional[int] ):
_A = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
_A = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
_A = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : bool ):
for iteration in range(1 , iterations + 1 ):
_A = self.feedforward()
self.back_propagation()
if give_loss:
_A = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : numpy.ndarray ):
_A = input_arr
_A = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
_A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
_A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def _snake_case ( ) -> int:
'''simple docstring'''
_A = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
_A = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
_A = TwoHiddenLayerNeuralNetwork(
input_array=_snake_case , output_array=_snake_case )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_snake_case , iterations=10 , give_loss=_snake_case )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 315 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCamelCase : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class __snake_case :
lowerCAmelCase__ = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
lowerCAmelCase__ = field(
default=__UpperCamelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase__ = field(
default=__UpperCamelCase , metadata={"help": "The column name of the images in the files."} )
lowerCAmelCase__ = field(default=__UpperCamelCase , metadata={"help": "A folder containing the training data."} )
lowerCAmelCase__ = field(default=__UpperCamelCase , metadata={"help": "A folder containing the validation data."} )
lowerCAmelCase__ = field(
default=0.1_5 , metadata={"help": "Percent to split off of train for validation."} )
lowerCAmelCase__ = field(
default=__UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase__ = field(
default=__UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Dict = {}
if self.train_dir is not None:
_lowerCAmelCase : str = self.train_dir
if self.validation_dir is not None:
_lowerCAmelCase : Optional[Any] = self.validation_dir
_lowerCAmelCase : Union[str, Any] = data_files if data_files else None
@dataclass
class __snake_case :
lowerCAmelCase__ = field(
default=__UpperCamelCase , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
lowerCAmelCase__ = field(
default=__UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} )
lowerCAmelCase__ = field(
default=__UpperCamelCase , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
lowerCAmelCase__ = field(
default=__UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
lowerCAmelCase__ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCAmelCase__ = field(default=__UpperCamelCase , metadata={"help": "Name or path of preprocessor config."} )
lowerCAmelCase__ = field(
default=__UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCAmelCase__ = field(
default=0.7_5 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} )
lowerCAmelCase__ = field(
default=__UpperCamelCase , metadata={"help": "Whether or not to train with normalized pixel values as target."} )
@dataclass
class __snake_case (__UpperCamelCase ):
lowerCAmelCase__ = field(
default=1e-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} )
def _UpperCAmelCase (UpperCamelCase_ : Any ):
'''simple docstring'''
_lowerCAmelCase : Any = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def _UpperCAmelCase ():
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowerCAmelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , _UpperCAmelCase , _UpperCAmelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCAmelCase : Any = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_lowerCAmelCase : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCAmelCase : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
_lowerCAmelCase : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_lowerCAmelCase : Optional[Any] = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _UpperCAmelCase ) and data_args.train_val_split > 0.0:
_lowerCAmelCase : Dict = ds['train'].train_test_split(data_args.train_val_split )
_lowerCAmelCase : Dict = split['train']
_lowerCAmelCase : Any = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase : Optional[Any] = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_lowerCAmelCase : int = ViTMAEConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_lowerCAmelCase : Optional[int] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
_lowerCAmelCase : List[Any] = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_lowerCAmelCase : Dict = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_lowerCAmelCase : str = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
_lowerCAmelCase : Dict = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_lowerCAmelCase : int = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
_lowerCAmelCase : Any = ViTMAEForPreTraining(_UpperCAmelCase )
if training_args.do_train:
_lowerCAmelCase : Tuple = ds['train'].column_names
else:
_lowerCAmelCase : Tuple = ds['validation'].column_names
if data_args.image_column_name is not None:
_lowerCAmelCase : Dict = data_args.image_column_name
elif "image" in column_names:
_lowerCAmelCase : Optional[int] = 'image'
elif "img" in column_names:
_lowerCAmelCase : List[str] = 'img'
else:
_lowerCAmelCase : Any = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_lowerCAmelCase : int = image_processor.size['shortest_edge']
else:
_lowerCAmelCase : Optional[int] = (image_processor.size['height'], image_processor.size['width'])
_lowerCAmelCase : str = Compose(
[
Lambda(lambda UpperCamelCase_ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(_UpperCAmelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(UpperCamelCase_ : int ):
_lowerCAmelCase : str = [transforms(_UpperCAmelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
_lowerCAmelCase : List[Any] = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_UpperCAmelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
_lowerCAmelCase : List[Any] = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_UpperCAmelCase )
# Compute absolute learning rate
_lowerCAmelCase : List[str] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_lowerCAmelCase : Dict = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
_lowerCAmelCase : Dict = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
_lowerCAmelCase : str = None
if training_args.resume_from_checkpoint is not None:
_lowerCAmelCase : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCAmelCase : Optional[Any] = last_checkpoint
_lowerCAmelCase : Union[str, Any] = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowerCAmelCase : List[Any] = trainer.evaluate()
trainer.log_metrics("""eval""" , _UpperCAmelCase )
trainer.save_metrics("""eval""" , _UpperCAmelCase )
# Write model card and (optionally) push to hub
_lowerCAmelCase : Any = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
def _UpperCAmelCase (UpperCamelCase_ : Union[str, Any] ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 351 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __snake_case (ctypes.Structure ):
# _fields is a specific attr expected by ctypes
lowerCAmelCase__ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def _UpperCAmelCase ():
'''simple docstring'''
if os.name == "nt":
_lowerCAmelCase : Tuple = CursorInfo()
_lowerCAmelCase : Any = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase_ , ctypes.byref(UpperCamelCase_ ) )
_lowerCAmelCase : Tuple = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase_ , ctypes.byref(UpperCamelCase_ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def _UpperCAmelCase ():
'''simple docstring'''
if os.name == "nt":
_lowerCAmelCase : Any = CursorInfo()
_lowerCAmelCase : str = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase_ , ctypes.byref(UpperCamelCase_ ) )
_lowerCAmelCase : List[Any] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase_ , ctypes.byref(UpperCamelCase_ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def _UpperCAmelCase ():
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 159 | 0 |
import doctest
from collections import deque
import numpy as np
class _SCREAMING_SNAKE_CASE :
def __init__( self )-> None:
lowerCamelCase_ =[2, 1, 2, -1]
lowerCamelCase_ =[1, 2, 3, 4]
def _snake_case ( self )-> list[float]:
lowerCamelCase_ =len(self.first_signal )
lowerCamelCase_ =len(self.second_signal )
lowerCamelCase_ =max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# create a zero matrix of max_length x max_length
lowerCamelCase_ =[[0] * max_length for i in range(_SCREAMING_SNAKE_CASE )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =deque(self.second_signal )
rotated_signal.rotate(_SCREAMING_SNAKE_CASE )
for j, item in enumerate(_SCREAMING_SNAKE_CASE ):
matrix[i][j] += item
# multiply the matrix with the first signal
lowerCamelCase_ =np.matmul(np.transpose(_SCREAMING_SNAKE_CASE ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(_SCREAMING_SNAKE_CASE , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 154 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Dict = logging.get_logger(__name__)
def __UpperCamelCase ( _A : Union[str, Any] ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
lowerCamelCase_ =re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , _A )
if matches:
lowerCamelCase_ =float(matches[1] )
lowerCamelCase_ =int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowerCamelCase_ =1001
lowerCamelCase_ ="""imagenet-1k-id2label.json"""
lowerCamelCase_ ="""huggingface/label-files"""
lowerCamelCase_ =json.load(open(hf_hub_download(_A , _A , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase_ ={int(_A ) + 1: v for k, v in idalabel.items()}
lowerCamelCase_ ="""background"""
lowerCamelCase_ =idalabel
lowerCamelCase_ ={v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase ( ) ->int:
"""simple docstring"""
lowerCamelCase_ ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase_ =Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _A : List[Any] , _A : Any , _A : str , _A : int=False ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =get_mobilenet_va_config(_A )
# Load 🤗 model
lowerCamelCase_ =MobileNetVaForImageClassification(_A ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_A , _A , _A )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowerCamelCase_ =MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
lowerCamelCase_ =image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase_ =model(**_A )
lowerCamelCase_ =outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
lowerCamelCase_ =torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
lowerCamelCase_ =torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
lowerCamelCase_ =None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _A , atol=1E-4 )
Path(_A ).mkdir(exist_ok=_A )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_A )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_A )
if push_to_hub:
print("""Pushing to the hub...""" )
lowerCamelCase_ ="""google/""" + model_name
image_processor.push_to_hub(_A )
model.push_to_hub(_A )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__A : List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 154 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : List[Any] = {
"""vocab_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : Any = {
"""yjernite/retribert-base-uncased""": 512,
}
_UpperCAmelCase : str = {
"""yjernite/retribert-base-uncased""": {"""do_lower_case""": True},
}
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : Union[str, Any] = RetriBertTokenizer
__SCREAMING_SNAKE_CASE : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ):
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' , snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , snake_case ) != tokenize_chinese_chars
):
snake_case_ = getattr(snake_case , normalizer_state.pop('type' ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**snake_case )
snake_case_ = do_lower_case
def a ( self , snake_case , snake_case=None ):
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self , snake_case , snake_case = None ):
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self , snake_case , snake_case = None ):
snake_case_ = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
| 200 |
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 1
snake_case_ = 2
while i * i <= n:
snake_case_ = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = 1
snake_case_ = 1
while True:
i += 1
t_num += i
if count_divisors(UpperCamelCase__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 200 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 64 |
lowerCamelCase : Tuple = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
lowerCamelCase : int = ['''a''', '''b''', '''c''', '''d''', '''e''']
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] ):
__lowercase : Dict = start
# add current to visited
visited.append(lowerCAmelCase_ )
__lowercase : Dict = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__lowercase : List[Any] = topological_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# if all neighbors visited add current to sort
sort.append(lowerCAmelCase_ )
# if all vertices haven't been visited select a new one to visit
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
for vertice in vertices:
if vertice not in visited:
__lowercase : Tuple = topological_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# return sort
return sort
if __name__ == "__main__":
lowerCamelCase : Any = topological_sort('''a''', [], [])
print(sort) | 233 | 0 |
import math
def UpperCamelCase ( __magic_name__ : int = 100 ) -> int:
"""simple docstring"""
lowercase__ = sum(i * i for i in range(1 , n + 1 ) )
lowercase__ = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 365 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : int = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''roberta-prelayernorm'''
def __init__(self : Dict , _UpperCAmelCase : List[Any]=5_0265 , _UpperCAmelCase : List[Any]=768 , _UpperCAmelCase : str=12 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : Any=3072 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : List[Any]=1E-1_2 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=0 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Optional[Any]="absolute" , _UpperCAmelCase : int=True , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : Any , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = classifier_dropout
class A ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase__ (self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 146 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class _UpperCamelCase ( A , A ):
'''simple docstring'''
lowerCAmelCase__ = """focalnet"""
def __init__( self : Union[str, Any] , _lowerCAmelCase : Tuple=2_2_4 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : Dict=3 , _lowerCAmelCase : int=9_6 , _lowerCAmelCase : Any=False , _lowerCAmelCase : Union[str, Any]=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , _lowerCAmelCase : Optional[int]=[2, 2, 6, 2] , _lowerCAmelCase : Tuple=[2, 2, 2, 2] , _lowerCAmelCase : int=[3, 3, 3, 3] , _lowerCAmelCase : Optional[Any]="gelu" , _lowerCAmelCase : Any=4.0 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : List[Any]=1e-4 , _lowerCAmelCase : str=False , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : int=0.02 , _lowerCAmelCase : Union[str, Any]=1e-5 , _lowerCAmelCase : Tuple=3_2 , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Optional[int] , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase)
__lowercase =image_size
__lowercase =patch_size
__lowercase =num_channels
__lowercase =embed_dim
__lowercase =use_conv_embed
__lowercase =hidden_sizes
__lowercase =depths
__lowercase =focal_levels
__lowercase =focal_windows
__lowercase =hidden_act
__lowercase =mlp_ratio
__lowercase =hidden_dropout_prob
__lowercase =drop_path_rate
__lowercase =use_layerscale
__lowercase =layerscale_value
__lowercase =use_post_layernorm
__lowercase =use_post_layernorm_in_modulation
__lowercase =normalize_modulator
__lowercase =initializer_range
__lowercase =layer_norm_eps
__lowercase =encoder_stride
__lowercase =['stem'] + [f"""stage{idx}""" for idx in range(1 , len(self.depths) + 1)]
__lowercase , __lowercase =get_aligned_output_features_output_indices(
out_features=_lowerCAmelCase , out_indices=_lowerCAmelCase , stage_names=self.stage_names)
| 166 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """umt5"""
lowerCAmelCase__ = ["""past_key_values"""]
def __init__( self : Optional[int] , _lowerCAmelCase : int=2_5_0_1_1_2 , _lowerCAmelCase : Union[str, Any]=5_1_2 , _lowerCAmelCase : List[Any]=6_4 , _lowerCAmelCase : Optional[Any]=1_0_2_4 , _lowerCAmelCase : Union[str, Any]=8 , _lowerCAmelCase : Any=None , _lowerCAmelCase : Tuple=6 , _lowerCAmelCase : str=3_2 , _lowerCAmelCase : List[str]=1_2_8 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Tuple=1e-6 , _lowerCAmelCase : List[Any]=1.0 , _lowerCAmelCase : Union[str, Any]="gated-gelu" , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : int=True , _lowerCAmelCase : Tuple="T5Tokenizer" , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Any=0 , **_lowerCAmelCase : int , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=_lowerCAmelCase , tokenizer_class=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
__lowercase =vocab_size
__lowercase =d_model
__lowercase =d_kv
__lowercase =d_ff
__lowercase =num_layers
__lowercase =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowercase =num_heads
__lowercase =relative_attention_num_buckets
__lowercase =relative_attention_max_distance
__lowercase =dropout_rate
__lowercase =layer_norm_epsilon
__lowercase =initializer_factor
__lowercase =feed_forward_proj
__lowercase =use_cache
__lowercase =self.feed_forward_proj.split('-')
__lowercase =act_info[-1]
__lowercase =act_info[0] == 'gated'
if len(_lowerCAmelCase) > 1 and act_info[0] != "gated" or len(_lowerCAmelCase) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
if feed_forward_proj == "gated-gelu":
__lowercase ='gelu_new'
@property
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return self.d_model
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return self.num_heads
@property
def __lowerCamelCase ( self : int):
'''simple docstring'''
return self.num_layers
class _UpperCamelCase ( A ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase ={
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__lowercase ='past_encoder_sequence + sequence'
__lowercase ={0: 'batch'}
__lowercase ={0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase ={0: 'batch', 1: 'decoder_sequence'}
__lowercase ={0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return 1_3
@property
def __lowerCamelCase ( self : int):
'''simple docstring'''
return 5e-4
| 166 | 1 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a ( datasets.BeamBasedBuilder ):
def __snake_case (self ) -> Optional[int]:
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ), supervised_keys=SCREAMING_SNAKE_CASE_, )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE_ )
class _a ( datasets.BeamBasedBuilder ):
def __snake_case (self ) -> Tuple:
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ), supervised_keys=SCREAMING_SNAKE_CASE_, )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ():
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def lowerCAmelCase_ ():
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class _a ( _lowerCAmelCase ):
@require_beam
def __snake_case (self ) -> Any:
UpperCAmelCase_: Any = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_: Optional[int] = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE_, beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE_, builder.name, """default""", """0.0.0""", f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features, datasets.Features({"""content""": datasets.Value("""string""" )} ) )
UpperCAmelCase_: Any = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows, SCREAMING_SNAKE_CASE_ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples, SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(dset["""train"""][0], get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1], get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_, builder.name, """default""", """0.0.0""", """dataset_info.json""" ) ) )
del dset
@require_beam
def __snake_case (self ) -> List[str]:
import apache_beam as beam
UpperCAmelCase_: List[str] = beam.io.parquetio.WriteToParquet
UpperCAmelCase_: Any = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_: Dict = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE_, beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
UpperCAmelCase_: List[Any] = partial(SCREAMING_SNAKE_CASE_, num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE_, builder.name, """default""", """0.0.0""", f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE_, builder.name, """default""", """0.0.0""", f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features, datasets.Features({"""content""": datasets.Value("""string""" )} ) )
UpperCAmelCase_: Any = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows, SCREAMING_SNAKE_CASE_ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples, SCREAMING_SNAKE_CASE_ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ), sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_, builder.name, """default""", """0.0.0""", """dataset_info.json""" ) ) )
del dset
@require_beam
def __snake_case (self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_: int = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE_ )
self.assertRaises(datasets.builder.MissingBeamOptions, builder.download_and_prepare )
@require_beam
def __snake_case (self ) -> Any:
UpperCAmelCase_: Optional[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_: int = NestedBeamDataset(cache_dir=SCREAMING_SNAKE_CASE_, beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE_, builder.name, """default""", """0.0.0""", f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features, datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
UpperCAmelCase_: Any = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows, SCREAMING_SNAKE_CASE_ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples, SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(dset["""train"""][0], get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1], get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_, builder.name, """default""", """0.0.0""", """dataset_info.json""" ) ) )
del dset
| 82 |
from __future__ import annotations
def lowerCAmelCase_ (lowerCAmelCase__: list[int | float] , lowerCAmelCase__: int , lowerCAmelCase__: int ):
"""simple docstring"""
if len(lowerCAmelCase__ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(lowerCAmelCase__ )
or left < -len(lowerCAmelCase__ )
or right >= len(lowerCAmelCase__ )
or right < -len(lowerCAmelCase__ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
UpperCAmelCase_: int = (left + right) >> 1 # the middle
UpperCAmelCase_: List[Any] = find_max(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # find max in range[left, mid]
UpperCAmelCase_: Any = find_max(lowerCAmelCase__ , mid + 1 , lowerCAmelCase__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 82 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : int = """visual_bert"""
def __init__( self: List[Any] , snake_case: Union[str, Any]=30_522 , snake_case: Dict=768 , snake_case: Any=512 , snake_case: Any=12 , snake_case: Any=12 , snake_case: List[Any]=3_072 , snake_case: int="gelu" , snake_case: int=0.1 , snake_case: str=0.1 , snake_case: str=512 , snake_case: Dict=2 , snake_case: int=0.0_2 , snake_case: Optional[int]=1E-12 , snake_case: str=False , snake_case: List[Any]=True , snake_case: Union[str, Any]=1 , snake_case: Optional[Any]=0 , snake_case: Tuple=2 , **snake_case: Union[str, Any] , ) -> Union[str, Any]:
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
snake_case_ :Optional[int] = vocab_size
snake_case_ :Optional[int] = max_position_embeddings
snake_case_ :Union[str, Any] = hidden_size
snake_case_ :Optional[int] = visual_embedding_dim
snake_case_ :int = num_hidden_layers
snake_case_ :Optional[int] = num_attention_heads
snake_case_ :Optional[int] = intermediate_size
snake_case_ :str = hidden_act
snake_case_ :Optional[Any] = hidden_dropout_prob
snake_case_ :str = attention_probs_dropout_prob
snake_case_ :List[Any] = initializer_range
snake_case_ :Optional[Any] = type_vocab_size
snake_case_ :Tuple = layer_norm_eps
snake_case_ :Optional[Any] = bypass_transformer
snake_case_ :List[str] = special_visual_initialize
| 66 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : str = StableDiffusionSAGPipeline
_A : Optional[Any] = TEXT_TO_IMAGE_PARAMS
_A : Any = TEXT_TO_IMAGE_BATCH_PARAMS
_A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : List[str] = False
def lowerCAmelCase_ ( self: Optional[Any] ) -> str:
torch.manual_seed(0 )
snake_case_ :Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
snake_case_ :Any = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=snake_case , set_alpha_to_one=snake_case , )
torch.manual_seed(0 )
snake_case_ :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
snake_case_ :Tuple = CLIPTextModel(snake_case )
snake_case_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ :Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase_ ( self: List[str] , snake_case: Tuple , snake_case: List[str]=0 ) -> str:
if str(snake_case ).startswith("""mps""" ):
snake_case_ :Tuple = torch.manual_seed(snake_case )
else:
snake_case_ :Optional[int] = torch.Generator(device=snake_case ).manual_seed(snake_case )
snake_case_ :Any = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self: Optional[int] ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: int ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self: int ) -> List[str]:
snake_case_ :Any = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
snake_case_ :int = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
snake_case_ :Union[str, Any] = """."""
snake_case_ :str = torch.manual_seed(0 )
snake_case_ :str = sag_pipe(
[prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
snake_case_ :List[Any] = output.images
snake_case_ :Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ :List[Any] = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCAmelCase_ ( self: Dict ) -> str:
snake_case_ :Tuple = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
snake_case_ :Optional[int] = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
snake_case_ :Tuple = """."""
snake_case_ :Union[str, Any] = torch.manual_seed(0 )
snake_case_ :Tuple = sag_pipe(
[prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
snake_case_ :Optional[int] = output.images
snake_case_ :Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ :Tuple = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCAmelCase_ ( self: List[str] ) -> List[str]:
snake_case_ :Optional[int] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
snake_case_ :int = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
snake_case_ :Tuple = """."""
snake_case_ :Optional[int] = torch.manual_seed(0 )
snake_case_ :List[str] = sag_pipe(
[prompt] , width=768 , height=512 , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
snake_case_ :Optional[Any] = output.images
assert image.shape == (1, 512, 768, 3)
| 66 | 1 |
'''simple docstring'''
def lowerCamelCase__ ( __lowerCamelCase : int = 1_0_0_0 ):
'''simple docstring'''
_UpperCAmelCase : Any =2**power
_UpperCAmelCase : Union[str, Any] =0
while n:
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] =r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 242 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowercase =logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCAmelCase ,lowerCAmelCase ):
@register_to_config
def __init__( self , snake_case , snake_case = None , snake_case = None) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : List[Any] =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_UpperCAmelCase : str =torch.zeros(snake_case , snake_case)
else:
_UpperCAmelCase : Tuple =None
_UpperCAmelCase : int =torch.nn.Parameter(snake_case)
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=snake_case , transformer=snake_case , text_encoder=snake_case , tokenizer=snake_case , scheduler=snake_case , learned_classifier_free_sampling_embeddings=snake_case , )
def lowerCAmelCase ( self , snake_case , snake_case , snake_case) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : int =len(snake_case) if isinstance(snake_case , snake_case) else 1
# get prompt text embeddings
_UpperCAmelCase : Optional[Any] =self.tokenizer(
snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
_UpperCAmelCase : Union[str, Any] =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_UpperCAmelCase : str =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}")
_UpperCAmelCase : Union[str, Any] =text_input_ids[:, : self.tokenizer.model_max_length]
_UpperCAmelCase : Optional[int] =self.text_encoder(text_input_ids.to(self.device))[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_UpperCAmelCase : List[str] =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case)
# duplicate text embeddings for each generation per prompt
_UpperCAmelCase : Optional[Any] =prompt_embeds.repeat_interleave(snake_case , dim=0)
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_UpperCAmelCase : Dict =self.learned_classifier_free_sampling_embeddings.embeddings
_UpperCAmelCase : Any =negative_prompt_embeds.unsqueeze(0).repeat(snake_case , 1 , 1)
else:
_UpperCAmelCase : str =[''] * batch_size
_UpperCAmelCase : Dict =text_input_ids.shape[-1]
_UpperCAmelCase : str =self.tokenizer(
snake_case , padding='max_length' , max_length=snake_case , truncation=snake_case , return_tensors='pt' , )
_UpperCAmelCase : str =self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# See comment for normalizing text embeddings
_UpperCAmelCase : Tuple =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case)
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCAmelCase : int =negative_prompt_embeds.shape[1]
_UpperCAmelCase : List[str] =negative_prompt_embeds.repeat(1 , snake_case , 1)
_UpperCAmelCase : Optional[int] =negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase : str =torch.cat([negative_prompt_embeds, prompt_embeds])
return prompt_embeds
@torch.no_grad()
def __call__( self , snake_case , snake_case = 1_0_0 , snake_case = 5.0 , snake_case = 1.0 , snake_case = 1 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(snake_case , snake_case):
_UpperCAmelCase : Tuple =1
elif isinstance(snake_case , snake_case):
_UpperCAmelCase : int =len(snake_case)
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case)}")
_UpperCAmelCase : Optional[Any] =batch_size * num_images_per_prompt
_UpperCAmelCase : Union[str, Any] =guidance_scale > 1.0
_UpperCAmelCase : Any =self._encode_prompt(snake_case , snake_case , snake_case)
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case)}.")
# get the initial completely masked latents unless the user supplied it
_UpperCAmelCase : List[Any] =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
_UpperCAmelCase : Optional[Any] =self.transformer.num_vector_embeds - 1
_UpperCAmelCase : Optional[int] =torch.full(snake_case , snake_case).to(self.device)
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
f" {self.transformer.num_vector_embeds - 1} (inclusive).")
_UpperCAmelCase : Optional[Any] =latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(snake_case , device=self.device)
_UpperCAmelCase : int =self.scheduler.timesteps.to(self.device)
_UpperCAmelCase : Dict =latents
for i, t in enumerate(self.progress_bar(snake_case)):
# expand the sample if we are doing classifier free guidance
_UpperCAmelCase : Union[str, Any] =torch.cat([sample] * 2) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_UpperCAmelCase : Optional[Any] =self.transformer(snake_case , encoder_hidden_states=snake_case , timestep=snake_case).sample
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase : Dict =model_output.chunk(2)
_UpperCAmelCase : Dict =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(snake_case , dim=1 , keepdim=snake_case)
_UpperCAmelCase : Any =self.truncate(snake_case , snake_case)
# remove `log(0)`'s (`-inf`s)
_UpperCAmelCase : int =model_output.clamp(-7_0)
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : List[Any] =self.scheduler.step(snake_case , timestep=snake_case , sample=snake_case , generator=snake_case).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case)
_UpperCAmelCase : List[str] =self.vqvae.config.vq_embed_dim
_UpperCAmelCase : Optional[int] =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_UpperCAmelCase : int =self.vqvae.quantize.get_codebook_entry(snake_case , shape=snake_case)
_UpperCAmelCase : str =self.vqvae.decode(snake_case , force_not_quantize=snake_case).sample
_UpperCAmelCase : str =(image / 2 + 0.5).clamp(0 , 1)
_UpperCAmelCase : Tuple =image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCAmelCase : Optional[int] =self.numpy_to_pil(snake_case)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case)
def lowerCAmelCase ( self , snake_case , snake_case) -> torch.FloatTensor:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Dict =torch.sort(snake_case , 1 , descending=snake_case)
_UpperCAmelCase : Dict =torch.exp(snake_case)
_UpperCAmelCase : str =sorted_p_x_0.cumsum(dim=1) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_UpperCAmelCase : Optional[int] =torch.full_like(keep_mask[:, 0:1, :] , snake_case)
_UpperCAmelCase : Any =torch.cat((all_true, keep_mask) , dim=1)
_UpperCAmelCase : Dict =keep_mask[:, :-1, :]
_UpperCAmelCase : Any =keep_mask.gather(1 , indices.argsort(1))
_UpperCAmelCase : str =log_p_x_0.clone()
_UpperCAmelCase : Any =-torch.inf # -inf = log(0)
return rv
| 242 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 108 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
lowercase_ = 0b1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
lowercase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = WATERMARK_BITS
__SCREAMING_SNAKE_CASE : Optional[int] = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def UpperCAmelCase__ ( self : List[Any] , _A : torch.FloatTensor ):
"""simple docstring"""
if images.shape[-1] < 256:
return images
__SCREAMING_SNAKE_CASE : Union[str, Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__SCREAMING_SNAKE_CASE : Dict = [self.encoder.encode(_A , '''dwtDct''' ) for image in images]
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(np.array(_A ) ).permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 303 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A : int = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__A : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 27 |
"""simple docstring"""
import argparse
import os
import re
__A : Dict = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
__A : Union[str, Any] = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
__A : Dict = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__A : List[str] = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
__A : Tuple = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__A : Tuple = re.compile(R'''\[([^\]]+)\]''')
def A_ ( snake_case_ : Dict ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = _re_indent.search(snake_case_ )
return "" if search is None else search.groups()[0]
def A_ ( snake_case_ : Union[str, Any] ,snake_case_ : Dict="" ,snake_case_ : Dict=None ,snake_case_ : Any=None ):
'''simple docstring'''
UpperCamelCase : Optional[int] = 0
UpperCamelCase : List[Any] = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(snake_case_ ):
index += 1
UpperCamelCase : Optional[Any] = ["""\n""".join(lines[:index] )]
else:
UpperCamelCase : int = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCamelCase : Any = [lines[index]]
index += 1
while index < len(snake_case_ ) and (end_prompt is None or not lines[index].startswith(snake_case_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(snake_case_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(snake_case_ ) )
if index < len(snake_case_ ) - 1:
UpperCamelCase : Any = [lines[index + 1]]
index += 1
else:
UpperCamelCase : List[str] = []
else:
blocks.append("""\n""".join(snake_case_ ) )
UpperCamelCase : int = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(snake_case_ ) > 0:
blocks.append("""\n""".join(snake_case_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(snake_case_ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def A_ ( snake_case_ : Optional[Any] ):
'''simple docstring'''
def _inner(snake_case_ : Tuple ):
return key(snake_case_ ).lower().replace("""_""" ,"""""" )
return _inner
def A_ ( snake_case_ : List[Any] ,snake_case_ : Optional[int]=None ):
'''simple docstring'''
# If no key is provided, we use a noop.
def noop(snake_case_ : Dict ):
return x
if key is None:
UpperCamelCase : int = noop
# Constants are all uppercase, they go first.
UpperCamelCase : List[Any] = [obj for obj in objects if key(snake_case_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCamelCase : str = [obj for obj in objects if key(snake_case_ )[0].isupper() and not key(snake_case_ ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCamelCase : List[str] = [obj for obj in objects if not key(snake_case_ )[0].isupper()]
UpperCamelCase : Tuple = ignore_underscore(snake_case_ )
return sorted(snake_case_ ,key=snake_case_ ) + sorted(snake_case_ ,key=snake_case_ ) + sorted(snake_case_ ,key=snake_case_ )
def A_ ( snake_case_ : int ):
'''simple docstring'''
# This inner function sort imports between [ ].
def _replace(snake_case_ : List[Any] ):
UpperCamelCase : Any = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
UpperCamelCase : Union[str, Any] = [part.strip().replace("""\"""" ,"""""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase : List[str] = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(snake_case_ )] ) + "]"
UpperCamelCase : str = import_statement.split("""\n""" )
if len(snake_case_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCamelCase : str = 2 if lines[1].strip() == """[""" else 1
UpperCamelCase : Dict = [(i, _re_strip_line.search(snake_case_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCamelCase : int = sort_objects(snake_case_ ,key=lambda snake_case_ : x[1] )
UpperCamelCase : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(snake_case_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCamelCase : List[Any] = _re_bracket_content.sub(_replace ,lines[1] )
else:
UpperCamelCase : Optional[Any] = [part.strip().replace("""\"""" ,"""""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase : List[Any] = keys[:-1]
UpperCamelCase : int = get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(snake_case_ )] )
return "\n".join(snake_case_ )
else:
# Finally we have to deal with imports fitting on one line
UpperCamelCase : List[str] = _re_bracket_content.sub(_replace ,snake_case_ )
return import_statement
def A_ ( snake_case_ : Tuple ,snake_case_ : str=True ):
'''simple docstring'''
with open(snake_case_ ,"""r""" ) as f:
UpperCamelCase : int = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCamelCase : Dict = split_code_in_indented_blocks(
snake_case_ ,start_prompt="""_import_structure = {""" ,end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 ,len(snake_case_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCamelCase : Optional[Any] = main_blocks[block_idx]
UpperCamelCase : Optional[int] = block.split("""\n""" )
# Get to the start of the imports.
UpperCamelCase : Union[str, Any] = 0
while line_idx < len(snake_case_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCamelCase : List[str] = len(snake_case_ )
else:
line_idx += 1
if line_idx >= len(snake_case_ ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCamelCase : Dict = """\n""".join(block_lines[line_idx:-1] )
UpperCamelCase : Union[str, Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCamelCase : Optional[int] = split_code_in_indented_blocks(snake_case_ ,indent_level=snake_case_ )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCamelCase : Union[str, Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCamelCase : Union[str, Any] = [(pattern.search(snake_case_ ).groups()[0] if pattern.search(snake_case_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCamelCase : Optional[Any] = [(i, key) for i, key in enumerate(snake_case_ ) if key is not None]
UpperCamelCase : List[Any] = [x[0] for x in sorted(snake_case_ ,key=lambda snake_case_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCamelCase : str = 0
UpperCamelCase : List[Any] = []
for i in range(len(snake_case_ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
UpperCamelCase : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(snake_case_ )
count += 1
# And we put our main block back together with its first and last line.
UpperCamelCase : Tuple = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(snake_case_ ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(snake_case_ ,"""w""" ) as f:
f.write("""\n""".join(snake_case_ ) )
def A_ ( snake_case_ : int=True ):
'''simple docstring'''
UpperCamelCase : Any = []
for root, _, files in os.walk(snake_case_ ):
if "__init__.py" in files:
UpperCamelCase : Union[str, Any] = sort_imports(os.path.join(snake_case_ ,"""__init__.py""" ) ,check_only=snake_case_ )
if result:
UpperCamelCase : Any = [os.path.join(snake_case_ ,"""__init__.py""" )]
if len(snake_case_ ) > 0:
raise ValueError(f'Would overwrite {len(snake_case_ )} files, run `make style`.' )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
__A : str = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 27 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """dandelin/vilt-b32-finetuned-vqa"""
UpperCamelCase_ = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
UpperCamelCase_ = """image_qa"""
UpperCamelCase_ = AutoProcessor
UpperCamelCase_ = AutoModelForVisualQuestionAnswering
UpperCamelCase_ = ["""image""", """text"""]
UpperCamelCase_ = ["""text"""]
def __init__( self : Optional[int] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''vision'''] )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : str , UpperCamelCase__ : "Image" , UpperCamelCase__ : str ):
'''simple docstring'''
return self.pre_processor(UpperCamelCase__ , UpperCamelCase__ , return_tensors='''pt''' )
def __A ( self : Optional[int] , UpperCamelCase__ : Any ):
'''simple docstring'''
with torch.no_grad():
return self.model(**UpperCamelCase__ ).logits
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 182 | import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__UpperCamelCase : Optional[Any] = ['gpt2']
__UpperCamelCase : str = 'gpt2'
if is_tf_available():
class lowercase__ ( tf.Module):
def __init__( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = tokenizer
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = TFGPTaLMHeadModel.from_config(UpperCamelCase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def __A ( self : str , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenized['''input_ids'''].to_tensor()
SCREAMING_SNAKE_CASE : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
SCREAMING_SNAKE_CASE : List[Any] = self.model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class lowercase__ ( unittest.TestCase):
def __A ( self : int ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[Any] = [GPTaTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
SCREAMING_SNAKE_CASE : List[str] = [TFGPTaTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
SCREAMING_SNAKE_CASE : Tuple = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
SCREAMING_SNAKE_CASE : Dict = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __A ( self : str ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE : Dict = tokenizer([test_inputs] , return_tensors='''tf''' )
SCREAMING_SNAKE_CASE : Any = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
SCREAMING_SNAKE_CASE : int = python_outputs[key].numpy()
SCREAMING_SNAKE_CASE : int = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase__ , tf.intaa ) == tf_outputs_values ) )
@slow
def __A ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : Optional[int] = tf.function(UpperCamelCase__ )
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = compiled_tokenizer(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = tf_tokenizer(UpperCamelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : str = ModelToSave(tokenizer=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.serving(UpperCamelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE : List[str] = Path(UpperCamelCase__ ) / '''saved.model'''
tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={'''serving_default''': model.serving} )
SCREAMING_SNAKE_CASE : str = tf.saved_model.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = loaded_model.signatures['''serving_default'''](UpperCamelCase__ )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE : Tuple = tf_tokenizer(UpperCamelCase__ ) # Build model with some sample inputs
SCREAMING_SNAKE_CASE : Union[str, Any] = tf_tokenizer.get_config()
SCREAMING_SNAKE_CASE : Optional[Any] = TFGPTaTokenizer.from_config(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = model_from_config(UpperCamelCase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
SCREAMING_SNAKE_CASE : Tuple = 12_3123
for max_length in [3, 5, 1024]:
SCREAMING_SNAKE_CASE : Dict = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE : Tuple = tf_tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 182 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : Dict )->List[Any]:
_UpperCAmelCase = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
_UpperCAmelCase = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(__UpperCamelCase ) , torch_builtin(__UpperCamelCase ) ) )
self.assertFalse(torch.allclose(gelu_python(__UpperCamelCase ) , gelu_new(__UpperCamelCase ) ) )
def lowercase__ ( self : List[Any] )->List[Any]:
_UpperCAmelCase = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
_UpperCAmelCase = get_activation('''gelu''' )
_UpperCAmelCase = get_activation('''gelu_10''' )
_UpperCAmelCase = torch_builtin(__UpperCamelCase )
_UpperCAmelCase = geluaa(__UpperCamelCase )
_UpperCAmelCase = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(__UpperCamelCase ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowercase__ ( self : Tuple )->int:
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(__UpperCamelCase ):
get_activation('''bogus''' )
with self.assertRaises(__UpperCamelCase ):
get_activation(__UpperCamelCase )
def lowercase__ ( self : Optional[Any] )->Union[str, Any]:
_UpperCAmelCase = get_activation('''gelu''' )
_UpperCAmelCase = 1
_UpperCAmelCase = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = acta.a
| 352 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(_SCREAMING_SNAKE_CASE ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 0 |
'''simple docstring'''
import cva
import numpy as np
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if k in (0.0_4, 0.0_6):
lowerCamelCase_ =k
lowerCamelCase_ =window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ):
"""simple docstring"""
return str(self.k )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =cva.imread(_A, 0 )
lowerCamelCase_ =img.shape
lowerCamelCase_ =[]
lowerCamelCase_ =img.copy()
lowerCamelCase_ =cva.cvtColor(_A, cva.COLOR_GRAY2RGB )
lowerCamelCase_ =np.gradient(_A )
lowerCamelCase_ =dx**2
lowerCamelCase_ =dy**2
lowerCamelCase_ =dx * dy
lowerCamelCase_ =0.0_4
lowerCamelCase_ =self.window_size // 2
for y in range(_A, h - offset ):
for x in range(_A, w - offset ):
lowerCamelCase_ =ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ =iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ =ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ =(wxx * wyy) - (wxy**2)
lowerCamelCase_ =wxx + wyy
lowerCamelCase_ =det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0), 0 )
color_img.itemset((y, x, 1), 0 )
color_img.itemset((y, x, 2), 255 )
return color_img, corner_list
if __name__ == "__main__":
a_ : Dict = HarrisCorner(0.04, 3)
a_ : Union[str, Any] = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 75 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_UpperCamelCase : Optional[int] = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class snake_case__ ( unittest.TestCase):
@classmethod
def A ( cls : Optional[int] ) -> Tuple:
UpperCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(_A )
@classmethod
def A ( cls : int ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : List[str] = FlaxBertModel(_A )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
UpperCAmelCase_ : Any = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase_ : int = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A , repo_id='''test-model-flax''' , push_to_hub=_A , use_auth_token=self._token )
UpperCAmelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : Optional[Any] = FlaxBertModel(_A )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
UpperCAmelCase_ : List[str] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_A , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_A , use_auth_token=self._token )
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
def __UpperCAmelCase ( A : Union[str, Any] , A : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Optional[int] = flatten_dict(modela.params )
UpperCAmelCase_ : str = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
UpperCAmelCase_ : int = False
return models_are_equal
@require_flax
class snake_case__ ( unittest.TestCase):
def A ( self : Any ) -> Any:
UpperCAmelCase_ : Any = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Any = FlaxBertModel(_A )
UpperCAmelCase_ : Tuple = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A , _A ) )
with self.assertRaises(_A ):
UpperCAmelCase_ : Optional[int] = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertTrue(check_models_equal(_A , _A ) )
def A ( self : int ) -> Tuple:
UpperCAmelCase_ : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Tuple = FlaxBertModel(_A )
UpperCAmelCase_ : str = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A , _A ) , max_shard_size='''10KB''' )
with self.assertRaises(_A ):
UpperCAmelCase_ : str = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : Dict = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertTrue(check_models_equal(_A , _A ) )
def A ( self : int ) -> Optional[int]:
UpperCAmelCase_ : int = '''bert'''
UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_A ):
UpperCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertIsNotNone(_A )
def A ( self : Any ) -> str:
UpperCAmelCase_ : Optional[Any] = '''bert'''
UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_A ):
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertIsNotNone(_A )
| 304 | 0 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 1000 ):
_snake_case, _snake_case = 1, 1
_snake_case = 2
while True:
_snake_case = 0
_snake_case = fa + fa
_snake_case, _snake_case = fa, f
index += 1
for _ in str(_SCREAMING_SNAKE_CASE ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 270 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 270 | 1 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__snake_case = TypeVar("""KEY""")
__snake_case = TypeVar("""VAL""")
@dataclass(frozen=lowercase_, slots=lowercase_ )
class UpperCAmelCase_ ( Generic[KEY, VAL] ):
"""simple docstring"""
UpperCamelCase_ : List[Any] =42
UpperCamelCase_ : str =42
class UpperCAmelCase_ ( _Item ):
"""simple docstring"""
def __init__( self ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __bool__( self ) -> Optional[int]:
return False
__snake_case = _DeletedItem()
class UpperCAmelCase_ ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ = 8 , SCREAMING_SNAKE_CASE_ = 0.75 ) -> int:
UpperCamelCase :Optional[Any] = initial_block_size
UpperCamelCase :list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
UpperCamelCase :Union[str, Any] = capacity_factor
UpperCamelCase :Tuple = 0
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
return hash(SCREAMING_SNAKE_CASE_ ) % len(self._buckets )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> str:
return (ind + 1) % len(self._buckets )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
UpperCamelCase :Dict = self._buckets[ind]
if not stored:
UpperCamelCase :List[Any] = _Item(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self._len += 1
return True
elif stored.key == key:
UpperCamelCase :List[str] = _Item(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return True
else:
return False
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :int = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Dict:
if len(self._buckets ) <= self._initial_block_size:
return False
UpperCamelCase :str = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase :int = self._buckets
UpperCamelCase :int = [None] * new_size
UpperCamelCase :Optional[Any] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCAmelCase ( self ) -> str:
self._resize(len(self._buckets ) * 2 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
self._resize(len(self._buckets ) // 2 )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase :Union[str, Any] = self._get_bucket_index(SCREAMING_SNAKE_CASE_ )
for _ in range(len(self._buckets ) ):
yield ind
UpperCamelCase :Dict = self._get_next_ind(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE_ ):
if self._try_set(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
break
def __setitem__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __delitem__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Dict = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE_ )
if item is _deleted:
continue
if item.key == key:
UpperCamelCase :Optional[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , SCREAMING_SNAKE_CASE_ ) -> int:
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :int = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE_ )
def __len__( self ) -> int:
return self._len
def __iter__( self ) -> Dict:
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> Optional[Any]:
UpperCamelCase :Optional[Any] = ' ,'.join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 259 |
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : int = 100 ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Any = (n * (n + 1) // 2) ** 2
lowerCAmelCase_ : Optional[int] = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 224 | 0 |
import json
import sys
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(UpperCamelCase__ , encoding='utf-8' ) as f:
__lowerCamelCase = json.load(UpperCamelCase__ )
__lowerCamelCase = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(UpperCamelCase__ ):
__lowerCamelCase = results[benchmark_name]
__lowerCamelCase = benchmark_name.split('/' )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
__lowerCamelCase = '| metric |'
__lowerCamelCase = '|--------|'
__lowerCamelCase = '| new / old (diff) |'
for metric_name in sorted(UpperCamelCase__ ):
__lowerCamelCase = benchmark_res[metric_name]
__lowerCamelCase = metric_vals['new']
__lowerCamelCase = metric_vals.get('old' , UpperCamelCase__ )
__lowerCamelCase = metric_vals.get('diff' , UpperCamelCase__ )
__lowerCamelCase = F""" {new_val:f}""" if isinstance(UpperCamelCase__ , (int, float) ) else 'None'
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(UpperCamelCase__ , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(UpperCamelCase__ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(UpperCamelCase__ ) )
if __name__ == "__main__":
__A = sys.argv[1]
__A = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 357 |
import sys
from collections import defaultdict
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = []
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.node_position[vertex]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = pos
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase = 2 * start + 1
else:
__lowerCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase , __lowerCamelCase = heap[smallest_child], positions[smallest_child]
__lowerCamelCase , __lowerCamelCase = (
heap[start],
positions[start],
)
__lowerCamelCase , __lowerCamelCase = temp, tempa
__lowerCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowerCamelCase__ )
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = position[index]
while index != 0:
__lowerCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase = heap[parent]
__lowerCamelCase = position[parent]
self.set_position(position[parent] , lowerCamelCase__ )
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , lowerCamelCase__ )
break
__lowerCamelCase = parent
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , 0 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = len(lowerCamelCase__ ) // 2 - 1
for i in range(lowerCamelCase__ , -1 , -1 ):
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = positions[0]
__lowerCamelCase = sys.maxsize
self.top_to_bottom(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ )
return temp
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = Heap()
__lowerCamelCase = [0] * len(UpperCamelCase__ )
__lowerCamelCase = [-1] * len(UpperCamelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase = []
for vertex in range(len(UpperCamelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase__ )
heap.node_position.append(UpperCamelCase__ )
__lowerCamelCase = []
__lowerCamelCase = 1
__lowerCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase = 0
__lowerCamelCase = distance
heap.heapify(UpperCamelCase__ , UpperCamelCase__ )
for _ in range(1 , len(UpperCamelCase__ ) ):
__lowerCamelCase = heap.delete_minimum(UpperCamelCase__ , UpperCamelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase__ )]
):
__lowerCamelCase = distance
heap.bottom_to_top(
UpperCamelCase__ , heap.get_position(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__A = int(input("Enter number of edges: ").strip())
__A = defaultdict(list)
for _ in range(edges_number):
__A = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 348 | 0 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
a = '''hf-internal-testing/tiny-random-t5'''
a = AutoTokenizer.from_pretrained(__UpperCAmelCase )
a = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase )
a = tokenizer('''This is me''' , return_tensors='''pt''' )
a = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
a = model.generate(**__UpperCAmelCase )
a = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCAmelCase )
a = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
a = model_reloaded.generate(**__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase ) )
def __lowerCAmelCase ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
a = '''hf-internal-testing/tiny-random-t5'''
a = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase )
a = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__UpperCAmelCase ):
model.save_pretrained(__UpperCAmelCase )
a = model.reverse_bettertransformer()
model.save_pretrained(__UpperCAmelCase )
| 0 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(UpperCamelCase_ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = _distribute_shards(**UpperCamelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = _split_gen_kwargs(UpperCamelCase_ , UpperCamelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if expected is RuntimeError:
with pytest.raises(UpperCamelCase_ ):
_number_of_shards_in_gen_kwargs(UpperCamelCase_ )
else:
__SCREAMING_SNAKE_CASE = _number_of_shards_in_gen_kwargs(UpperCamelCase_ )
assert out == expected
| 100 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DanceDiffusionPipeline
_a = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_a = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
_a = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_a = False
_a = False
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : Optional[int] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=1_6000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_a , use_timestep_embedding=_a , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
_A : int = IPNDMScheduler()
_A : Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def a__ ( self , _a , _a=0 ) -> Optional[int]:
if str(_a ).startswith("""mps""" ):
_A : Dict = torch.manual_seed(_a )
else:
_A : Union[str, Any] = torch.Generator(device=_a ).manual_seed(_a )
_A : str = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def a__ ( self ) -> List[Any]:
_A : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : int = self.get_dummy_components()
_A : str = DanceDiffusionPipeline(**_a )
_A : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : List[str] = self.get_dummy_inputs(_a )
_A : Tuple = pipe(**_a )
_A : Optional[Any] = output.audios
_A : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_A : List[Any] = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def a__ ( self ) -> List[str]:
return super().test_save_load_local()
@skip_mps
def a__ ( self ) -> Union[str, Any]:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def a__ ( self ) -> Union[str, Any]:
return super().test_save_load_optional_components()
@skip_mps
def a__ ( self ) -> Optional[Any]:
return super().test_attention_slicing_forward_pass()
def a__ ( self ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> Dict:
_A : Optional[int] = torch_device
_A : Optional[Any] = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
_A : Tuple = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : List[Any] = torch.manual_seed(0 )
_A : Union[str, Any] = pipe(generator=_a , num_inference_steps=100 , audio_length_in_s=4.096 )
_A : Optional[Any] = output.audios
_A : Optional[int] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_A : List[Any] = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self ) -> Optional[Any]:
_A : int = torch_device
_A : Tuple = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
_A : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = torch.manual_seed(0 )
_A : Optional[int] = pipe(generator=_a , num_inference_steps=100 , audio_length_in_s=4.096 )
_A : Tuple = output.audios
_A : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_A : Dict = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 343 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
@register_to_config
def __init__( self , _a = 32 , _a = 64 , _a = 20 , _a = 768 , _a=77 , _a=4 , _a = 0.0 , _a = "silu" , _a = None , _a = None , _a = "linear" , _a = "prd" , _a = None , _a = None , _a = None , ) -> Any:
super().__init__()
_A : int = num_attention_heads
_A : Union[str, Any] = attention_head_dim
_A : Tuple = num_attention_heads * attention_head_dim
_A : Any = additional_embeddings
_A : Any = time_embed_dim or inner_dim
_A : List[str] = embedding_proj_dim or embedding_dim
_A : Optional[int] = clip_embed_dim or embedding_dim
_A : Union[str, Any] = Timesteps(_a , _a , 0 )
_A : str = TimestepEmbedding(_a , _a , out_dim=_a , act_fn=_a )
_A : Dict = nn.Linear(_a , _a )
if embedding_proj_norm_type is None:
_A : int = None
elif embedding_proj_norm_type == "layer":
_A : Optional[Any] = nn.LayerNorm(_a )
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
_A : Optional[Any] = nn.Linear(_a , _a )
if encoder_hid_proj_type is None:
_A : Union[str, Any] = None
elif encoder_hid_proj_type == "linear":
_A : Tuple = nn.Linear(_a , _a )
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
_A : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _a ) )
if added_emb_type == "prd":
_A : str = nn.Parameter(torch.zeros(1 , 1 , _a ) )
elif added_emb_type is None:
_A : Union[str, Any] = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
_A : int = nn.ModuleList(
[
BasicTransformerBlock(
_a , _a , _a , dropout=_a , activation_fn="""gelu""" , attention_bias=_a , )
for d in range(_a )
] )
if norm_in_type == "layer":
_A : Union[str, Any] = nn.LayerNorm(_a )
elif norm_in_type is None:
_A : Tuple = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''' )
_A : int = nn.LayerNorm(_a )
_A : str = nn.Linear(_a , _a )
_A : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
_A : Optional[int] = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , _a , persistent=_a )
_A : Tuple = nn.Parameter(torch.zeros(1 , _a ) )
_A : Dict = nn.Parameter(torch.zeros(1 , _a ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ) -> Dict[str, AttentionProcessor]:
_A : List[str] = {}
def fn_recursive_add_processors(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
_A : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , _a , _a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_a , _a , _a )
return processors
def a__ ( self , _a ) -> List[str]:
_A : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_a , _a ) and len(_a ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(_a )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
if not isinstance(_a , _a ):
module.set_processor(_a )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , _a , _a )
for name, module in self.named_children():
fn_recursive_attn_processor(_a , _a , _a )
def a__ ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
def a__ ( self , _a , _a , _a , _a = None , _a = None , _a = True , ) -> Optional[Any]:
_A : Tuple = hidden_states.shape[0]
_A : List[Any] = timestep
if not torch.is_tensor(_a ):
_A : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_a ) and len(timesteps.shape ) == 0:
_A : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_A : Optional[int] = timesteps * torch.ones(_a , dtype=timesteps.dtype , device=timesteps.device )
_A : Dict = self.time_proj(_a )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_A : Tuple = timesteps_projected.to(dtype=self.dtype )
_A : List[Any] = self.time_embedding(_a )
if self.embedding_proj_norm is not None:
_A : Dict = self.embedding_proj_norm(_a )
_A : List[Any] = self.embedding_proj(_a )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_A : List[Any] = self.encoder_hidden_states_proj(_a )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
_A : Optional[int] = self.proj_in(_a )
_A : Optional[int] = self.positional_embedding.to(hidden_states.dtype )
_A : Union[str, Any] = []
_A : List[str] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_a )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_A : List[str] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_A : List[str] = hidden_states[:, None, :]
_A : Dict = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_A : Optional[int] = self.prd_embedding.to(hidden_states.dtype ).expand(_a , -1 , -1 )
additional_embeds.append(_a )
_A : str = torch.cat(
_a , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_A : Dict = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_A : Union[str, Any] = F.pad(
_a , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_A : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_A : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
_A : List[Any] = F.pad(_a , (0, self.additional_embeddings) , value=0.0 )
_A : Optional[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_A : int = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_A : str = self.norm_in(_a )
for block in self.transformer_blocks:
_A : List[Any] = block(_a , attention_mask=_a )
_A : Any = self.norm_out(_a )
if self.prd_embedding is not None:
_A : int = hidden_states[:, -1]
else:
_A : Any = hidden_states[:, additional_embeddings_len:]
_A : Union[str, Any] = self.proj_to_clip_embeddings(_a )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_a )
def a__ ( self , _a ) -> Tuple:
_A : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 343 | 1 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def snake_case ( self , _snake_case=0 ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor((1, 3, 128, 128) , rng=random.Random(_snake_case ) )
_lowerCAmelCase = np.random.RandomState(_snake_case )
_lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = pipe(**_snake_case ).images
_lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase = np.array([0.6_9643, 0.5_8484, 0.5_0314, 0.5_8760, 0.5_5368, 0.5_9643, 0.5_1529, 0.4_1217, 0.4_9087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = pipe(**_snake_case ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase = np.array([0.6_1737, 0.5_4642, 0.5_3183, 0.5_4465, 0.5_2742, 0.6_0525, 0.4_9969, 0.4_0655, 0.4_8154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_snake_case )
# warmup pass to apply optimizations
_lowerCAmelCase = pipe(**self.get_dummy_inputs() )
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = pipe(**_snake_case ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase = np.array([0.5_2761, 0.5_9977, 0.4_9033, 0.4_9619, 0.5_4282, 0.5_0311, 0.4_7600, 0.4_0918, 0.4_5203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = pipe(**_snake_case ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = pipe(**_snake_case ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = pipe(**_snake_case ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase = np.array([0.6_5331, 0.5_8277, 0.4_8204, 0.5_6059, 0.5_3665, 0.5_6235, 0.5_0969, 0.4_0009, 0.4_6552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
@property
def snake_case ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = ort.SessionOptions()
_lowerCAmelCase = False
return options
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_lowerCAmelCase = init_image.resize((768, 512) )
# using the PNDM scheduler by default
_lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = """A fantasy landscape, trending on artstation"""
_lowerCAmelCase = np.random.RandomState(0 )
_lowerCAmelCase = pipe(
prompt=_snake_case , image=_snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=_snake_case , output_type="""np""" , )
_lowerCAmelCase = output.images
_lowerCAmelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_lowerCAmelCase = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_lowerCAmelCase = init_image.resize((768, 512) )
_lowerCAmelCase = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
_lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_snake_case , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = """A fantasy landscape, trending on artstation"""
_lowerCAmelCase = np.random.RandomState(0 )
_lowerCAmelCase = pipe(
prompt=_snake_case , image=_snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=_snake_case , output_type="""np""" , )
_lowerCAmelCase = output.images
_lowerCAmelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_lowerCAmelCase = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 82 |
from math import isqrt, loga
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case , snake_case ):
_lowerCAmelCase = False
return [i for i in range(2 , snake_case ) if is_prime[i]]
def _UpperCAmelCase ( snake_case = 80_08_00 , snake_case = 80_08_00 ):
"""simple docstring"""
_lowerCAmelCase = degree * loga(snake_case )
_lowerCAmelCase = int(snake_case )
_lowerCAmelCase = calculate_prime_numbers(snake_case )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = len(snake_case ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"{solution() = }")
| 82 | 1 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
_SCREAMING_SNAKE_CASE = 3
def _lowerCAmelCase ( lowerCamelCase_ : int ):
print('''Generating primitive root of p''' )
while True:
__lowercase = random.randrange(3 , lowerCamelCase_ )
if pow(lowerCamelCase_ , 2 , lowerCamelCase_ ) == 1:
continue
if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) == 1:
continue
return g
def _lowerCAmelCase ( lowerCamelCase_ : int ):
print('''Generating prime p...''' )
__lowercase = rabin_miller.generate_large_prime(lowerCamelCase_ ) # select large prime number.
__lowercase = primitive_root(lowerCamelCase_ ) # one primitive root on modulo p.
__lowercase = random.randrange(3 , lowerCamelCase_ ) # private_key -> have to be greater than 2 for safety.
__lowercase = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
__lowercase = (key_size, e_a, e_a, p)
__lowercase = (key_size, d)
return public_key, private_key
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
if os.path.exists(f"{name}_pubkey.txt" ) or os.path.exists(f"{name}_privkey.txt" ):
print('''\nWARNING:''' )
print(
f"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
__lowercase , __lowercase = generate_key(lowerCamelCase_ )
print(f"\nWriting public key to file {name}_pubkey.txt..." )
with open(f"{name}_pubkey.txt" , '''w''' ) as fo:
fo.write(f"{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}" )
print(f"Writing private key to file {name}_privkey.txt..." )
with open(f"{name}_privkey.txt" , '''w''' ) as fo:
fo.write(f"{private_key[0]},{private_key[1]}" )
def _lowerCAmelCase ( ):
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_0_4_8 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 217 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = ["pixel_values"]
def __init__(self ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = PILImageResampling.BILINEAR ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = True ,_lowerCamelCase = 1 / 255 ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> None:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
__lowercase = size if size is not None else {'''shortest_edge''': 256}
__lowercase = get_size_dict(_lowerCamelCase ,default_to_square=_lowerCamelCase )
__lowercase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__lowercase = get_size_dict(_lowerCamelCase ,param_name='''crop_size''' )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = PILImageResampling.BICUBIC ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> np.ndarray:
'''simple docstring'''
__lowercase = get_size_dict(_lowerCamelCase ,default_to_square=_lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__lowercase = get_resize_output_image_size(_lowerCamelCase ,size=size['''shortest_edge'''] ,default_to_square=_lowerCamelCase )
return resize(_lowerCamelCase ,size=_lowerCamelCase ,resample=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> np.ndarray:
'''simple docstring'''
__lowercase = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(_lowerCamelCase ,size=(size['''height'''], size['''width''']) ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
return rescale(_lowerCamelCase ,scale=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> np.ndarray:
'''simple docstring'''
return normalize(_lowerCamelCase ,mean=_lowerCamelCase ,std=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = ChannelDimension.FIRST ,**_lowerCamelCase ,) -> Any:
'''simple docstring'''
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(_lowerCamelCase ,default_to_square=_lowerCamelCase )
__lowercase = resample if resample is not None else self.resample
__lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase = crop_size if crop_size is not None else self.crop_size
__lowercase = get_size_dict(_lowerCamelCase ,param_name='''crop_size''' )
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
__lowercase = [self.resize(image=_lowerCamelCase ,size=_lowerCamelCase ,resample=_lowerCamelCase ) for image in images]
if do_center_crop:
__lowercase = [self.center_crop(image=_lowerCamelCase ,size=_lowerCamelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=_lowerCamelCase ,scale=_lowerCamelCase ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=_lowerCamelCase ,mean=_lowerCamelCase ,std=_lowerCamelCase ) for image in images]
__lowercase = [to_channel_dimension_format(_lowerCamelCase ,_lowerCamelCase ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase ,tensor_type=_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> str:
'''simple docstring'''
__lowercase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_lowerCamelCase ):
__lowercase = target_sizes.numpy()
__lowercase = []
for idx in range(len(_lowerCamelCase ) ):
__lowercase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='''bilinear''' ,align_corners=_lowerCamelCase )
__lowercase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowerCamelCase )
else:
__lowercase = logits.argmax(dim=1 )
__lowercase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 217 | 1 |
from __future__ import annotations
from typing import Any
def A ( a_ ) -> Any:
create_state_space_tree(_A ,[] ,0 )
def A ( a_ ,a_ ,a_ ) -> int:
if index == len(_A ):
print(_A )
return
create_state_space_tree(_A ,_A ,index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_A ,_A ,index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
A_ :Dict = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 71 |
from __future__ import annotations
def UpperCAmelCase__ ( _A : float , _A : float , _A : float , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 188 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__ ( A_ ):
__a = """timesformer"""
def __init__( self : Tuple , _lowerCamelCase : Any=224 , _lowerCamelCase : str=16 , _lowerCamelCase : Tuple=3 , _lowerCamelCase : List[str]=8 , _lowerCamelCase : Union[str, Any]=768 , _lowerCamelCase : Dict=12 , _lowerCamelCase : List[Any]=12 , _lowerCamelCase : Optional[int]=3072 , _lowerCamelCase : str="gelu" , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Dict=0.0 , _lowerCamelCase : str=0.0_2 , _lowerCamelCase : Any=1e-6 , _lowerCamelCase : Any=True , _lowerCamelCase : Tuple="divided_space_time" , _lowerCamelCase : Optional[Any]=0 , **_lowerCamelCase : List[Any] , ):
super().__init__(**_lowerCamelCase )
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = num_frames
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = qkv_bias
_snake_case = attention_type
_snake_case = drop_path_rate
| 350 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase__ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase__ = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
UpperCAmelCase__ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class lowerCAmelCase__ ( A_ ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_INIT_CONFIGURATION
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = BertTokenizer
def __init__( self : Optional[int] , _lowerCamelCase : int=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Tuple=True , _lowerCamelCase : Optional[int]="[UNK]" , _lowerCamelCase : Any="[SEP]" , _lowerCamelCase : Any="[PAD]" , _lowerCamelCase : List[Any]="[CLS]" , _lowerCamelCase : Dict="[MASK]" , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Any=None , **_lowerCamelCase : int , ):
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
_snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowerCamelCase ) != tokenize_chinese_chars
):
_snake_case = getattr(_lowerCamelCase , normalizer_state.pop('''type''' ) )
_snake_case = do_lower_case
_snake_case = strip_accents
_snake_case = tokenize_chinese_chars
_snake_case = normalizer_class(**_lowerCamelCase )
_snake_case = do_lower_case
def lowercase ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any]=None ):
_snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self : Optional[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
_snake_case = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 40 | 0 |
'''simple docstring'''
from typing import Any
def lowerCamelCase (_SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : dict , ):
_validation(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
# Creates data structures and fill initial step
__a : dict = {}
__a : dict = {}
for state in states_space:
__a : List[Any] = observations_space[0]
__a : str = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__a : List[Any] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
__a : str = observations_space[o]
__a : Tuple = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__a : Optional[Any] = ''
__a : List[str] = -1
for k_state in states_space:
__a : Tuple = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__a : List[Any] = probability
__a : Dict = k_state
# Update probabilities and pointers dicts
__a : int = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__a : Optional[Any] = arg_max
# The final observation
__a : List[Any] = observations_space[len(_SCREAMING_SNAKE_CASE ) - 1]
# argmax for given final observation
__a : str = ''
__a : str = -1
for k_state in states_space:
__a : Optional[int] = probabilities[(k_state, final_observation)]
if probability > max_probability:
__a : Dict = probability
__a : Union[str, Any] = k_state
__a : int = arg_max
# Process pointers backwards
__a : List[str] = last_state
__a : Union[str, Any] = []
for o in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -1 ):
result.append(_SCREAMING_SNAKE_CASE )
__a : str = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , ):
_validate_not_empty(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
_validate_lists(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_validate_dicts(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('There\'s an empty parameter' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any ):
_validate_list(_SCREAMING_SNAKE_CASE , 'observations_space' )
_validate_list(_SCREAMING_SNAKE_CASE , 'states_space' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ):
if not isinstance(_object , _SCREAMING_SNAKE_CASE ):
__a : Tuple = F"""{var_name} must be a list"""
raise ValueError(_SCREAMING_SNAKE_CASE )
else:
for x in _object:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Any = F"""{var_name} must be a list of strings"""
raise ValueError(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , ):
_validate_dict(_SCREAMING_SNAKE_CASE , 'initial_probabilities' , _SCREAMING_SNAKE_CASE )
_validate_nested_dict(_SCREAMING_SNAKE_CASE , 'transition_probabilities' )
_validate_nested_dict(_SCREAMING_SNAKE_CASE , 'emission_probabilities' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ):
_validate_dict(_object , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for x in _object.values():
_validate_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : type , _SCREAMING_SNAKE_CASE : bool = False ):
if not isinstance(_object , _SCREAMING_SNAKE_CASE ):
__a : Any = F"""{var_name} must be a dict"""
raise ValueError(_SCREAMING_SNAKE_CASE )
if not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in _object ):
__a : List[str] = F"""{var_name} all keys must be strings"""
raise ValueError(_SCREAMING_SNAKE_CASE )
if not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in _object.values() ):
__a : str = 'nested dictionary ' if nested else ''
__a : int = F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 27 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
__a : Optional[Any] = tmp_path / 'file.csv'
__a : Union[str, Any] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : str = tmp_path / 'malformed_file.csv'
__a : int = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = tmp_path / 'csv_with_image.csv'
__a : Dict = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Union[str, Any] = tmp_path / 'csv_with_label.csv'
__a : Any = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ):
__a : Dict = tmp_path / 'csv_with_int_list.csv'
__a : Tuple = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
__a : int = Csv()
__a : str = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_SCREAMING_SNAKE_CASE , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(_SCREAMING_SNAKE_CASE ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
__a : Tuple = f.read().splitlines()[1]
__a : Tuple = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
__a : Any = csv._generate_tables([[csv_file_with_image]] )
__a : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
__a : Any = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
__a : Tuple = f.read().splitlines()[1:]
__a : Optional[int] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
__a : List[str] = csv._generate_tables([[csv_file_with_label]] )
__a : Dict = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
__a : int = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(_SCREAMING_SNAKE_CASE ) for label in labels]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
__a : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda _SCREAMING_SNAKE_CASE : [int(_SCREAMING_SNAKE_CASE ) for i in x.split()]} )
__a : Any = csv._generate_tables([[csv_file_with_int_list]] )
__a : Any = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
__a : Tuple = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 27 | 1 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def A ( lowercase , lowercase ) -> np.array:
'''simple docstring'''
UpperCamelCase = f'''{sampling_rate}'''
UpperCamelCase = '1'
UpperCamelCase = 'f32le'
UpperCamelCase = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(lowercase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCamelCase = ffmpeg_process.communicate(lowercase )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
UpperCamelCase = output_stream[0]
UpperCamelCase = np.frombuffer(lowercase , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def A ( lowercase , lowercase , lowercase = "f32le" , ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = f'''{sampling_rate}'''
UpperCamelCase = '1'
if format_for_conversion == "s16le":
UpperCamelCase = 2
elif format_for_conversion == "f32le":
UpperCamelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
UpperCamelCase = platform.system()
if system == "Linux":
UpperCamelCase = 'alsa'
UpperCamelCase = 'default'
elif system == "Darwin":
UpperCamelCase = 'avfoundation'
UpperCamelCase = ':0'
elif system == "Windows":
UpperCamelCase = 'dshow'
UpperCamelCase = 'default'
UpperCamelCase = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
UpperCamelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCamelCase = _ffmpeg_stream(lowercase , lowercase )
for item in iterator:
yield item
def A ( lowercase , lowercase , lowercase = None , lowercase = None , lowercase = "f32le" , ) -> int:
'''simple docstring'''
if stream_chunk_s is not None:
UpperCamelCase = stream_chunk_s
else:
UpperCamelCase = chunk_length_s
UpperCamelCase = ffmpeg_microphone(lowercase , lowercase , format_for_conversion=lowercase )
if format_for_conversion == "s16le":
UpperCamelCase = np.intaa
UpperCamelCase = 2
elif format_for_conversion == "f32le":
UpperCamelCase = np.floataa
UpperCamelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
UpperCamelCase = chunk_length_s / 6
UpperCamelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowercase , (int, float) ):
UpperCamelCase = [stride_length_s, stride_length_s]
UpperCamelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCamelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCamelCase = datetime.datetime.now()
UpperCamelCase = datetime.timedelta(seconds=lowercase )
for item in chunk_bytes_iter(lowercase , lowercase , stride=(stride_left, stride_right) , stream=lowercase ):
# Put everything back in numpy scale
UpperCamelCase = np.frombuffer(item['raw'] , dtype=lowercase )
UpperCamelCase = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
UpperCamelCase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def A ( lowercase , lowercase , lowercase , lowercase = False ) -> str:
'''simple docstring'''
UpperCamelCase = B''
UpperCamelCase , UpperCamelCase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
UpperCamelCase = 0
for raw in iterator:
acc += raw
if stream and len(lowercase ) < chunk_len:
UpperCamelCase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowercase ) >= chunk_len:
# We are flushing the accumulator
UpperCamelCase = (_stride_left, stride_right)
UpperCamelCase = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
UpperCamelCase = False
yield item
UpperCamelCase = stride_left
UpperCamelCase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowercase ) > stride_left:
UpperCamelCase = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
UpperCamelCase = False
yield item
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = 2**24 # 16Mo
try:
with subprocess.Popen(lowercase , stdout=subprocess.PIPE , bufsize=lowercase ) as ffmpeg_process:
while True:
UpperCamelCase = ffmpeg_process.stdout.read(lowercase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 110 |
from __future__ import annotations
def A ( lowercase , lowercase ) -> tuple[int, int]:
'''simple docstring'''
if b == 0:
return (1, 0)
((UpperCamelCase) , (UpperCamelCase)) = extended_euclid(lowercase , a % b )
UpperCamelCase = a // b
return (y, x - k * y)
def A ( lowercase , lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
((UpperCamelCase) , (UpperCamelCase)) = extended_euclid(lowercase , lowercase )
UpperCamelCase = na * na
UpperCamelCase = ra * x * na + ra * y * na
return (n % m + m) % m
def A ( lowercase , lowercase ) -> int:
'''simple docstring'''
((UpperCamelCase) , (UpperCamelCase)) = extended_euclid(lowercase , lowercase )
if b < 0:
UpperCamelCase = (b % n + n) % n
return b
def A ( lowercase , lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = invert_modulo(lowercase , lowercase ), invert_modulo(lowercase , lowercase )
UpperCamelCase = na * na
UpperCamelCase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 110 | 1 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_snake_case = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a__ :
_SCREAMING_SNAKE_CASE : Tuple = PegasusConfig
_SCREAMING_SNAKE_CASE : int = {}
_SCREAMING_SNAKE_CASE : Optional[int] = 'gelu'
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=99 , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=20 , _UpperCamelCase=2 , _UpperCamelCase=1 , _UpperCamelCase=0 , ):
"""simple docstring"""
_lowercase : Union[str, Any] = parent
_lowercase : Optional[int] = batch_size
_lowercase : List[str] = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : int = use_labels
_lowercase : Optional[int] = vocab_size
_lowercase : str = hidden_size
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Tuple = max_position_embeddings
_lowercase : List[str] = eos_token_id
_lowercase : Optional[int] = pad_token_id
_lowercase : Optional[Any] = bos_token_id
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_lowercase : Tuple = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_lowercase : int = np.concatenate([input_ids, eos_tensor] , axis=1 )
_lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowercase : Optional[int] = prepare_pegasus_inputs_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return config, inputs_dict
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : List[Any] = 20
_lowercase : int = model_class_name(_UpperCamelCase )
_lowercase : str = model.encode(inputs_dict["input_ids"] )
_lowercase , _lowercase : List[str] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_lowercase : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase )
_lowercase : str = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
_lowercase : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
_lowercase : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_lowercase : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCamelCase , )
_lowercase : Any = model.decode(_UpperCamelCase , _UpperCamelCase )
_lowercase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : List[str] = 20
_lowercase : List[Any] = model_class_name(_UpperCamelCase )
_lowercase : Dict = model.encode(inputs_dict["input_ids"] )
_lowercase , _lowercase : int = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_lowercase : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowercase : int = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase )
_lowercase : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
_lowercase : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_lowercase : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , _UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
_lowercase : Dict = model.decode(_UpperCamelCase , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase )
_lowercase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def _A ( snake_case , snake_case , snake_case , snake_case=None , snake_case=None , ) -> int:
if attention_mask is None:
_lowercase : int = np.not_equal(snake_case , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_lowercase : Dict = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a__ ( lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_SCREAMING_SNAKE_CASE : List[str] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : List[Any] = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = FlaxPegasusModelTester(self )
_lowercase : Union[str, Any] = ConfigTester(self , config_class=_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : Optional[int] = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
_lowercase : Union[str, Any] = model_class(_UpperCamelCase )
@jax.jit
def encode_jitted(_UpperCamelCase , _UpperCamelCase=None , **_UpperCamelCase ):
return model.encode(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )
with self.subTest("JIT Enabled" ):
_lowercase : Optional[Any] = encode_jitted(**_UpperCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_lowercase : Dict = encode_jitted(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : Optional[int] = model_class(_UpperCamelCase )
_lowercase : Tuple = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
_lowercase : List[Any] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return model.decode(
decoder_input_ids=_UpperCamelCase , decoder_attention_mask=_UpperCamelCase , encoder_outputs=_UpperCamelCase , )
with self.subTest("JIT Enabled" ):
_lowercase : List[Any] = decode_jitted(**_UpperCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_lowercase : Optional[Any] = decode_jitted(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowercase : List[Any] = model_class_name.from_pretrained("google/pegasus-large" , from_pt=_UpperCamelCase )
_lowercase : Optional[int] = np.ones((1, 1) )
_lowercase : int = model(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : str = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
_lowercase : str = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
_lowercase : str = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
_lowercase : Optional[int] = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
_lowercase : Optional[int] = tokenizer(_UpperCamelCase , return_tensors="np" , truncation=_UpperCamelCase , max_length=512 , padding=_UpperCamelCase )
_lowercase : Tuple = model.generate(**_UpperCamelCase , num_beams=2 ).sequences
_lowercase : Union[str, Any] = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
assert tgt_text == decoded
| 250 |
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_snake_case = logging.get_logger('transformers.models.speecht5')
_snake_case = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
_snake_case = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
_snake_case = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
_snake_case = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
_snake_case = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
_snake_case = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
_snake_case = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
_snake_case = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
_snake_case = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_snake_case = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_snake_case = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_snake_case = []
_snake_case = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
_snake_case = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
_snake_case = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
_snake_case = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def _A ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[Any]:
for attribute in key.split("." ):
_lowercase : Dict = getattr(snake_case , snake_case )
if weight_type is not None:
_lowercase : Union[str, Any] = getattr(snake_case , snake_case ).shape
else:
_lowercase : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_lowercase : str = value
elif weight_type == "weight_g":
_lowercase : List[str] = value
elif weight_type == "weight_v":
_lowercase : int = value
elif weight_type == "bias":
_lowercase : Union[str, Any] = value
elif weight_type == "running_mean":
_lowercase : Union[str, Any] = value
elif weight_type == "running_var":
_lowercase : Optional[Any] = value
elif weight_type == "num_batches_tracked":
_lowercase : Tuple = value
else:
_lowercase : int = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _A ( snake_case , snake_case ) -> str:
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_lowercase , _lowercase : Optional[Any] = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _A ( snake_case , snake_case , snake_case ) -> Tuple:
_lowercase : List[Any] = []
if task == "s2t":
_lowercase : str = hf_model.speechta.encoder.prenet.feature_encoder
_lowercase : List[str] = MAPPING_S2T
_lowercase : Optional[Any] = IGNORE_KEYS_S2T
elif task == "t2s":
_lowercase : List[Any] = None
_lowercase : Tuple = MAPPING_T2S
_lowercase : List[str] = IGNORE_KEYS_T2S
elif task == "s2s":
_lowercase : Union[str, Any] = hf_model.speechta.encoder.prenet.feature_encoder
_lowercase : Optional[Any] = MAPPING_S2S
_lowercase : Tuple = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(snake_case , snake_case ):
logger.info(F'''{name} was ignored''' )
continue
_lowercase : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
snake_case , snake_case , snake_case , snake_case , hf_model.config.feat_extract_norm == "group" , )
_lowercase : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_lowercase , _lowercase : Optional[int] = key.split(".*." )
if prefix in name and suffix in name:
_lowercase : Dict = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_lowercase : Optional[Any] = True
if "*" in mapped_key:
_lowercase : int = name.split(snake_case )[0].split("." )[-2]
_lowercase : Union[str, Any] = mapped_key.replace("*" , snake_case )
if "weight_g" in name:
_lowercase : Dict = "weight_g"
elif "weight_v" in name:
_lowercase : Optional[Any] = "weight_v"
elif "bias" in name:
_lowercase : Any = "bias"
elif "weight" in name:
_lowercase : Dict = "weight"
elif "running_mean" in name:
_lowercase : List[Any] = "running_mean"
elif "running_var" in name:
_lowercase : Union[str, Any] = "running_var"
elif "num_batches_tracked" in name:
_lowercase : str = "num_batches_tracked"
else:
_lowercase : str = None
set_recursively(snake_case , snake_case , snake_case , snake_case , snake_case )
continue
if not is_used:
unused_weights.append(snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _A ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[str]:
_lowercase : Optional[int] = full_name.split("conv_layers." )[-1]
_lowercase : Tuple = name.split("." )
_lowercase : Optional[int] = int(items[0] )
_lowercase : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
_lowercase : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
_lowercase : int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
_lowercase : Optional[int] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
_lowercase : Tuple = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case )
@torch.no_grad()
def _A ( snake_case , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , ) -> Optional[Any]:
if config_path is not None:
_lowercase : List[str] = SpeechTaConfig.from_pretrained(snake_case )
else:
_lowercase : int = SpeechTaConfig()
if task == "s2t":
_lowercase : List[Any] = config.max_text_positions
_lowercase : Optional[int] = SpeechTaForSpeechToText(snake_case )
elif task == "t2s":
_lowercase : str = 18_76
_lowercase : str = 6_00
_lowercase : List[Any] = config.max_speech_positions
_lowercase : Optional[int] = SpeechTaForTextToSpeech(snake_case )
elif task == "s2s":
_lowercase : Tuple = 18_76
_lowercase : List[Any] = config.max_speech_positions
_lowercase : str = SpeechTaForSpeechToSpeech(snake_case )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
_lowercase : Any = SpeechTaTokenizer(snake_case , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_lowercase : Optional[int] = AddedToken("<mask>" , lstrip=snake_case , rstrip=snake_case )
_lowercase : int = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
_lowercase : Dict = SpeechTaFeatureExtractor()
_lowercase : str = SpeechTaProcessor(tokenizer=snake_case , feature_extractor=snake_case )
processor.save_pretrained(snake_case )
_lowercase : Union[str, Any] = torch.load(snake_case )
recursively_load_weights(fairseq_checkpoint["model"] , snake_case , snake_case )
model.save_pretrained(snake_case )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(snake_case )
model.push_to_hub(snake_case )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_snake_case = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 250 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A =logging.get_logger(__name__)
__A ={
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _snake_case ( a__ , a__ ):
lowerCAmelCase :Any = '''swin'''
lowerCAmelCase :Optional[int] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowerCamelCase=224 , _lowerCamelCase=4 , _lowerCamelCase=3 , _lowerCamelCase=96 , _lowerCamelCase=[2, 2, 6, 2] , _lowerCamelCase=[3, 6, 12, 24] , _lowerCamelCase=7 , _lowerCamelCase=4.0 , _lowerCamelCase=True , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=False , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase=32 , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase)
UpperCAmelCase__ : Tuple = image_size
UpperCAmelCase__ : str = patch_size
UpperCAmelCase__ : List[str] = num_channels
UpperCAmelCase__ : List[str] = embed_dim
UpperCAmelCase__ : List[str] = depths
UpperCAmelCase__ : List[str] = len(_lowerCamelCase)
UpperCAmelCase__ : int = num_heads
UpperCAmelCase__ : int = window_size
UpperCAmelCase__ : Dict = mlp_ratio
UpperCAmelCase__ : List[str] = qkv_bias
UpperCAmelCase__ : Tuple = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = drop_path_rate
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : Union[str, Any] = use_absolute_embeddings
UpperCAmelCase__ : int = layer_norm_eps
UpperCAmelCase__ : Dict = initializer_range
UpperCAmelCase__ : Any = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase__ : Optional[int] = int(embed_dim * 2 ** (len(_lowerCamelCase) - 1))
UpperCAmelCase__ : Any = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(_lowerCamelCase) + 1)]
UpperCAmelCase__ , UpperCAmelCase__ : Dict = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names)
class _snake_case ( a__ ):
lowerCAmelCase :Any = version.parse('''1.11''' )
@property
def snake_case__ ( self):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def snake_case__ ( self):
return 1e-4 | 283 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _snake_case ( a__ ):
lowerCAmelCase :Optional[int] = ['''image_processor''', '''tokenizer''']
lowerCAmelCase :Optional[int] = '''BridgeTowerImageProcessor'''
lowerCAmelCase :List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , _lowerCamelCase , _lowerCamelCase):
super().__init__(_lowerCamelCase , _lowerCamelCase)
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCAmelCase__ : List[str] = self.tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
# add pixel_values + pixel_mask
UpperCAmelCase__ : Optional[Any] = self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , do_normalize=_lowerCamelCase , do_center_crop=_lowerCamelCase , **_lowerCamelCase)
encoding.update(_lowerCamelCase)
return encoding
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase)
@property
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = self.tokenizer.model_input_names
UpperCAmelCase__ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) | 283 | 1 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowerCAmelCase__ ( __lowercase ):
a__ : List[Any] = """detr"""
a__ : int = ["""past_key_values"""]
a__ : Dict = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_00 , SCREAMING_SNAKE_CASE__ : Optional[int]=6 , SCREAMING_SNAKE_CASE__ : List[Any]=20_48 , SCREAMING_SNAKE_CASE__ : Tuple=8 , SCREAMING_SNAKE_CASE__ : Optional[int]=6 , SCREAMING_SNAKE_CASE__ : str=20_48 , SCREAMING_SNAKE_CASE__ : str=8 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : str="relu" , SCREAMING_SNAKE_CASE__ : Any=2_56 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=1.0 , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : int="sine" , SCREAMING_SNAKE_CASE__ : Tuple="resnet50" , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : Dict=5 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , **SCREAMING_SNAKE_CASE__ : int , ) -> Union[str, Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__lowerCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = backbone_config.get('''model_type''' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# set timm attributes to None
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None, None, None
__lowerCamelCase = use_timm_backbone
__lowerCamelCase = backbone_config
__lowerCamelCase = num_channels
__lowerCamelCase = num_queries
__lowerCamelCase = d_model
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = encoder_layers
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = init_xavier_std
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = encoder_layers
__lowerCamelCase = auxiliary_loss
__lowerCamelCase = position_embedding_type
__lowerCamelCase = backbone
__lowerCamelCase = use_pretrained_backbone
__lowerCamelCase = dilation
# Hungarian matcher
__lowerCamelCase = class_cost
__lowerCamelCase = bbox_cost
__lowerCamelCase = giou_cost
# Loss coefficients
__lowerCamelCase = mask_loss_coefficient
__lowerCamelCase = dice_loss_coefficient
__lowerCamelCase = bbox_loss_coefficient
__lowerCamelCase = giou_loss_coefficient
__lowerCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __A ( self : List[Any] ) -> int:
return self.encoder_attention_heads
@property
def __A ( self : int ) -> int:
return self.d_model
@classmethod
def __A ( cls : List[str] , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
return cls(backbone_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> Dict[str, any]:
__lowerCamelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__lowerCamelCase = self.backbone_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
class lowerCAmelCase__ ( __lowercase ):
a__ : List[str] = version.parse("""1.11""" )
@property
def __A ( self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __A ( self : List[Any] ) -> float:
return 1e-5
@property
def __A ( self : Dict ) -> int:
return 12
| 270 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : List[Any] = TypeVar("T")
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (position - 1) // 2
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (2 * position) + 1
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (2 * position) + 2
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : List[str] ) -> None:
__lowerCamelCase = []
__lowerCamelCase = {}
__lowerCamelCase = 0
def __len__( self : Optional[int] ) -> int:
return self.elements
def __repr__( self : Optional[int] ) -> str:
return str(self.heap )
def __A ( self : Union[str, Any] ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def __A ( self : str , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__lowerCamelCase = self.elements
self.elements += 1
self._bubble_up(SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__lowerCamelCase , __lowerCamelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__lowerCamelCase , __lowerCamelCase = self.heap[0]
self._bubble_down(SCREAMING_SNAKE_CASE__ )
return elem
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Update the weight of the given key
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase = (elem, weight)
if position > 0:
__lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
__lowerCamelCase = self.position_map[elem]
if curr_pos == 0:
return None
__lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase , __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_up(SCREAMING_SNAKE_CASE__ )
return None
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase , __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase = get_child_left_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = get_child_right_position(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements and child_right_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_left_position]
__lowerCamelCase , __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
return None
if child_right_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
return None
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Swap the nodes at the given positions
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase , __lowerCamelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__lowerCamelCase = nodea_pos
__lowerCamelCase = nodea_pos
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Tuple ) -> None:
__lowerCamelCase = {}
__lowerCamelCase = 0
def __repr__( self : Optional[int] ) -> str:
return str(self.connections )
def __len__( self : List[str] ) -> int:
return self.nodes
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__lowerCamelCase = {}
self.nodes += 1
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(SCREAMING_SNAKE_CASE__ )
self.add_node(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = weight
__lowerCamelCase = weight
def __magic_name__ ( __lowerCAmelCase : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
__lowerCamelCase = {node: maxsize for node in graph.connections}
__lowerCamelCase = {node: None for node in graph.connections}
__lowerCamelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowerCAmelCase , __lowerCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
__lowerCamelCase = priority_queue.extract_min()
__lowerCamelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCAmelCase , dist[neighbour] )
__lowerCamelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
__lowerCamelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCAmelCase , dist[neighbour] )
__lowerCamelCase = node
return dist, parent
| 270 | 1 |
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : list ) -> list:
"""simple docstring"""
lowerCAmelCase_ : int = len(lowerCAmelCase__ )
for i in range(1 , lowerCAmelCase__ ):
lowerCAmelCase_ : List[Any] = collection[i]
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : Dict = i - 1
while low <= high:
lowerCAmelCase_ : Dict = (low + high) // 2
if val < collection[mid]:
lowerCAmelCase_ : Optional[int] = mid - 1
else:
lowerCAmelCase_ : List[str] = mid + 1
for j in range(lowerCAmelCase__ , lowerCAmelCase__ , -1 ):
lowerCAmelCase_ : Union[str, Any] = collection[j - 1]
lowerCAmelCase_ : Optional[Any] = val
return collection
if __name__ == "__main__":
lowercase__ : Tuple = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ : str = [int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 350 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowercase__ : Optional[int] = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 289 | 0 |
'''simple docstring'''
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=64, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=16, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase=None, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_mask
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =embedding_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =scope
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase_ =None
if self.use_input_mask:
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase_ =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self ):
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, embedding_size=self.embedding_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_lowerCamelCase, initializer_range=self.initializer_range, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =MobileBertModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCamelCase_ =model(_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase )
lowerCamelCase_ =model(_lowerCamelCase, token_type_ids=_lowerCamelCase )
lowerCamelCase_ =model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =MobileBertForMaskedLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCamelCase_ =model(_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =MobileBertForNextSentencePrediction(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCamelCase_ =model(
_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, labels=_lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =MobileBertForPreTraining(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCamelCase_ =model(
_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, labels=_lowerCamelCase, next_sentence_label=_lowerCamelCase, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =MobileBertForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCamelCase_ =model(
_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, start_positions=_lowerCamelCase, end_positions=_lowerCamelCase, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =MobileBertForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCamelCase_ =model(_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =MobileBertForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCamelCase_ =model(_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.num_choices
lowerCamelCase_ =MobileBertForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =model(
_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, labels=_lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) =config_and_inputs
lowerCamelCase_ ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( a_ , a_ , unittest.TestCase ):
lowercase : Tuple =(
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase : List[Any] =(
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase : Any =True
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ =super()._prepare_for_class(_lowerCamelCase, _lowerCamelCase, return_labels=_lowerCamelCase )
if return_labels:
if model_class in get_values(_lowerCamelCase ):
lowerCamelCase_ =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=_lowerCamelCase )
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=_lowerCamelCase )
return inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =MobileBertModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=_lowerCamelCase, hidden_size=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_lowerCamelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_lowerCamelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_lowerCamelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_lowerCamelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_lowerCamelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_lowerCamelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_lowerCamelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_lowerCamelCase )
def a_ ( __snake_case : Any ) -> Any:
"""simple docstring"""
return torch.tensor(
__snake_case , dtype=torch.long , device=__snake_case , )
a_ : Dict = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(_lowerCamelCase )
lowerCamelCase_ =_long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
lowerCamelCase_ =model(_lowerCamelCase )[0]
lowerCamelCase_ =torch.Size((1, 9, 512) )
self.assertEqual(output.shape, _lowerCamelCase )
lowerCamelCase_ =torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
], device=_lowerCamelCase, )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCamelCase_ =torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCamelCase_ =torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 75 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_UpperCamelCase : Tuple = int(input('Enter number: ').strip())
print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
| 220 | 0 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , *UpperCamelCase : str , **UpperCamelCase : Tuple ):
'''simple docstring'''
super().__init__(*UpperCamelCase , **UpperCamelCase )
requires_backends(self , 'decord' )
self.check_model_type(UpperCamelCase )
def UpperCamelCase_ ( self : Any , UpperCamelCase : Any=None , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Optional[int]=None ):
'''simple docstring'''
_snake_case : Optional[Any] = {}
if frame_sampling_rate is not None:
_snake_case : List[str] = frame_sampling_rate
if num_frames is not None:
_snake_case : int = num_frames
_snake_case : Any = {}
if top_k is not None:
_snake_case : Tuple = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[Any] , UpperCamelCase : Union[str, List[str]] , **UpperCamelCase : int ):
'''simple docstring'''
return super().__call__(UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Any=1 ):
'''simple docstring'''
if num_frames is None:
_snake_case : Union[str, Any] = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
_snake_case : Union[str, Any] = BytesIO(requests.get(UpperCamelCase ).content )
_snake_case : List[str] = VideoReader(UpperCamelCase )
videoreader.seek(0 )
_snake_case : str = 0
_snake_case : Dict = num_frames * frame_sampling_rate - 1
_snake_case : Optional[int] = np.linspace(UpperCamelCase , UpperCamelCase , num=UpperCamelCase , dtype=np.intaa )
_snake_case : int = videoreader.get_batch(UpperCamelCase ).asnumpy()
_snake_case : Union[str, Any] = list(UpperCamelCase )
_snake_case : List[Any] = self.image_processor(UpperCamelCase , return_tensors=self.framework )
return model_inputs
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model(**UpperCamelCase )
return model_outputs
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Dict , UpperCamelCase : int=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
_snake_case : str = self.model.config.num_labels
if self.framework == "pt":
_snake_case : Optional[int] = model_outputs.logits.softmax(-1 )[0]
_snake_case : Any = probs.topk(UpperCamelCase )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
_snake_case : int = scores.tolist()
_snake_case : Optional[Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase , UpperCamelCase )]
| 366 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""nielsr/canine-s""": 2048,
}
# Unicode defines 1,114,112 total “codepoints”
lowerCAmelCase_ = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0xE_000
lowerCAmelCase_ = 0xE_001
lowerCAmelCase_ = 0xE_002
lowerCAmelCase_ = 0xE_003
lowerCAmelCase_ = 0xE_004
# Maps special codepoints to human-readable names.
lowerCAmelCase_ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
lowerCAmelCase_ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , UpperCamelCase : int=chr(UpperCamelCase ) , UpperCamelCase : Union[str, Any]=chr(UpperCamelCase ) , UpperCamelCase : Any=chr(UpperCamelCase ) , UpperCamelCase : Union[str, Any]=chr(UpperCamelCase ) , UpperCamelCase : List[Any]=chr(UpperCamelCase ) , UpperCamelCase : List[str]=chr(UpperCamelCase ) , UpperCamelCase : int=False , UpperCamelCase : str=20_48 , **UpperCamelCase : List[str] , ):
'''simple docstring'''
_snake_case : Tuple = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else bos_token
_snake_case : Optional[Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else eos_token
_snake_case : Any = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else sep_token
_snake_case : str = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else cls_token
_snake_case : Dict = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case : str = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
bos_token=UpperCamelCase , eos_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , model_max_length=UpperCamelCase , **UpperCamelCase , )
# Creates a mapping for looking up the IDs of special symbols.
_snake_case : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
_snake_case : Tuple = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
_snake_case : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
_snake_case : str = UNICODE_VOCAB_SIZE
_snake_case : Optional[Any] = len(self._special_codepoints )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return self._unicode_vocab_size
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : str ):
'''simple docstring'''
return list(UpperCamelCase )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : str ):
'''simple docstring'''
try:
return ord(UpperCamelCase )
except TypeError:
raise ValueError(f"""invalid token: '{token}'""" )
def UpperCamelCase_ ( self : Dict , UpperCamelCase : int ):
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCamelCase )
except TypeError:
raise ValueError(f"""invalid id: {index}""" )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : List[Any] ):
'''simple docstring'''
return "".join(UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = [self.sep_token_id]
_snake_case : int = [self.cls_token_id]
_snake_case : Any = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
_snake_case : int = [1] + ([0] * len(UpperCamelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCamelCase )) + [1]
return result
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : List[Any] = [self.sep_token_id]
_snake_case : Dict = [self.cls_token_id]
_snake_case : Tuple = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
return ()
| 260 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = DanceDiffusionPipeline
__lowerCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
__lowerCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=1_6000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowerCamelCase_ , use_timestep_embedding=lowerCamelCase_ , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
UpperCamelCase = IPNDMScheduler()
UpperCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any]=0 ):
"""simple docstring"""
if str(lowerCamelCase_ ).startswith("""mps""" ):
UpperCamelCase = torch.manual_seed(lowerCamelCase_ )
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
UpperCamelCase = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = DanceDiffusionPipeline(**lowerCamelCase_ )
UpperCamelCase = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ )
UpperCamelCase = pipe(**lowerCamelCase_ )
UpperCamelCase = output.audios
UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
UpperCamelCase = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = torch_device
UpperCamelCase = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
UpperCamelCase = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(generator=lowerCamelCase_ , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
UpperCamelCase = output.audios
UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = torch_device
UpperCamelCase = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
UpperCamelCase = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(generator=lowerCamelCase_ , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
UpperCamelCase = output.audios
UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 343 | import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
_SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(6_4, 6_4)
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
_SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
_SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_SCREAMING_SNAKE_CASE = """Normal"""
if result[0][0] == 1:
_SCREAMING_SNAKE_CASE = """Abnormality detected"""
| 343 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowerCamelCase : List[str] = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _a ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : Tuple = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(SCREAMING_SNAKE_CASE__ , id=SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
# Doctest custom flag to ignore output.
_lowerCamelCase : Any = doctest.register_optionflag('''IGNORE_RESULT''')
_lowerCamelCase : Any = doctest.OutputChecker
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
def A_ ( self : Optional[Any], _UpperCAmelCase : Any, _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
_lowerCamelCase : Dict = CustomOutputChecker
_lowerCamelCase : List[Any] = HfDoctestModule
_lowerCamelCase : Dict = HfDocTestParser
| 191 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=13_37 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=13_37 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def _a ( SCREAMING_SNAKE_CASE__ : SplitDict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = split_dict._to_yaml_list()
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = SplitDict._from_yaml_list(SCREAMING_SNAKE_CASE__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
SCREAMING_SNAKE_CASE__ : str = None
# the split name of split_dict takes over the name of the split info object
SCREAMING_SNAKE_CASE__ : Union[str, Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=SCREAMING_SNAKE_CASE__ ), SplitInfo(dataset_name="my_dataset" )] )
def _a ( SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 191 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[Any] = '''falcon'''
__lowercase : Optional[int] = ['''past_key_values''']
def __init__( self ,__UpperCAmelCase=6_5024 ,__UpperCAmelCase=4544 ,__UpperCAmelCase=32 ,__UpperCAmelCase=71 ,__UpperCAmelCase=1E-5 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=True ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=None ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase=11 ,__UpperCAmelCase=11 ,**__UpperCAmelCase ,) -> Union[str, Any]:
lowerCAmelCase__ : List[str] = vocab_size
# Backward compatibility with n_embed kwarg
lowerCAmelCase__ : List[str] = kwargs.pop("""n_embed""" ,__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = hidden_size if n_embed is None else n_embed
lowerCAmelCase__ : List[Any] = num_hidden_layers
lowerCAmelCase__ : Optional[Any] = num_attention_heads
lowerCAmelCase__ : str = layer_norm_epsilon
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : str = use_cache
lowerCAmelCase__ : str = hidden_dropout
lowerCAmelCase__ : Tuple = attention_dropout
lowerCAmelCase__ : Tuple = bos_token_id
lowerCAmelCase__ : Union[str, Any] = eos_token_id
lowerCAmelCase__ : Any = num_attention_heads if num_kv_heads is None else num_kv_heads
lowerCAmelCase__ : int = alibi
lowerCAmelCase__ : Any = new_decoder_architecture
lowerCAmelCase__ : Optional[int] = multi_query # Ignored when new_decoder_architecture is True
lowerCAmelCase__ : Union[str, Any] = parallel_attn
lowerCAmelCase__ : str = bias
super().__init__(bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase )
@property
def UpperCAmelCase_ ( self ) -> str:
return self.hidden_size // self.num_attention_heads
@property
def UpperCAmelCase_ ( self ) -> Tuple:
return not self.alibi
| 37 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Union[str, Any]:
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = features.copy() if features else default_expected_features
lowerCAmelCase = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase = TextDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
if issubclass(snake_case__ , snake_case__ ):
lowerCAmelCase = text_path
elif issubclass(snake_case__ , snake_case__ ):
lowerCAmelCase = [text_path]
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__=("train",) ) -> Optional[Any]:
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase = TextDatasetReader({'''train''': text_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
lowerCAmelCase = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = features.copy() if features else default_expected_features
lowerCAmelCase = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase = TextDatasetReader({'''train''': text_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Any:
if split:
lowerCAmelCase = {split: text_path}
else:
lowerCAmelCase = '''train'''
lowerCAmelCase = {'''train''': text_path, '''test''': text_path}
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 338 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase: Tuple = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: int = ["MobileViTFeatureExtractor"]
__lowercase: Union[str, Any] = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Any = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Dict = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__lowercase: Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 31 |
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
try:
with open(_UpperCamelCase , "rb" ) as binary_file:
UpperCamelCase__ = binary_file.read()
for dat in data:
UpperCamelCase__ = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = {"0": "0", "1": "1"}
UpperCamelCase__ , UpperCamelCase__ = "", ""
UpperCamelCase__ = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ = lexicon[curr_string]
result += last_match_id
UpperCamelCase__ = last_match_id + "0"
if math.loga(_UpperCamelCase ).is_integer():
UpperCamelCase__ = {}
for curr_key in list(_UpperCamelCase ):
UpperCamelCase__ = lexicon.pop(_UpperCamelCase )
UpperCamelCase__ = new_lex
UpperCamelCase__ = last_match_id + "1"
index += 1
UpperCamelCase__ = ""
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = 8
try:
with open(_UpperCamelCase , "wb" ) as opened_file:
UpperCamelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCamelCase__ = data_bits[counter:]
UpperCamelCase__ = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = read_file_binary(_UpperCamelCase )
UpperCamelCase__ = remove_prefix(_UpperCamelCase )
UpperCamelCase__ = decompress_data(_UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2]) | 31 | 1 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int = 13 , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 3 , SCREAMING_SNAKE_CASE : int = 3 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 128 , SCREAMING_SNAKE_CASE : Dict=[16, 32, 64, 128] , SCREAMING_SNAKE_CASE : int = 7 , SCREAMING_SNAKE_CASE : int = 4 , SCREAMING_SNAKE_CASE : int = 37 , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 10 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 128 , SCREAMING_SNAKE_CASE : List[int] = [2, 2, 2, 2] , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , ):
lowercase__ : int = parent
lowercase__ : List[str] = batch_size
lowercase__ : List[Any] = image_size
lowercase__ : Union[str, Any] = patch_size
lowercase__ : Any = num_channels
lowercase__ : Union[str, Any] = is_training
lowercase__ : Union[str, Any] = use_labels
lowercase__ : Tuple = hidden_size
lowercase__ : Tuple = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : int = hidden_act
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : Optional[int] = attention_probs_dropout_prob
lowercase__ : List[str] = type_sequence_label_size
lowercase__ : str = initializer_range
lowercase__ : Optional[int] = encoder_stride
lowercase__ : Optional[int] = num_attention_outputs
lowercase__ : Any = embed_dim
lowercase__ : Dict = embed_dim + 1
lowercase__ : Union[str, Any] = resolution
lowercase__ : Optional[int] = depths
lowercase__ : Union[str, Any] = hidden_sizes
lowercase__ : Dict = dim
lowercase__ : List[str] = mlp_expansion_ratio
def snake_case ( self : Dict ):
lowercase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : str = None
if self.use_labels:
lowercase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : List[str] = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Optional[int] ):
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Optional[Any] = TFEfficientFormerModel(config=__UpperCAmelCase )
lowercase__ : int = model(__UpperCAmelCase , training=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Optional[int] = self.type_sequence_label_size
lowercase__ : Union[str, Any] = TFEfficientFormerForImageClassification(__UpperCAmelCase )
lowercase__ : Dict = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ : Union[str, Any] = 1
lowercase__ : Any = TFEfficientFormerForImageClassification(__UpperCAmelCase )
lowercase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : List[str] = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self : Tuple ):
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase__ : List[str] = config_and_inputs
lowercase__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case__(_a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowercase_ = (
{
"""feature-extraction""": TFEfficientFormerModel,
"""image-classification""": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[Any] ):
lowercase__ : Any = TFEfficientFormerModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(
self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def snake_case ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def snake_case ( self : int ):
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def snake_case ( self : Optional[Any] ):
pass
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Dict = model_class(__UpperCAmelCase )
lowercase__ : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : List[str] = [*signature.parameters.keys()]
lowercase__ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def snake_case ( self : str ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : int = model_class(__UpperCAmelCase )
lowercase__ : Optional[int] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) , training=__UpperCAmelCase )
lowercase__ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Optional[Any] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
if hasattr(self.model_tester , "encoder_seq_length" ):
lowercase__ : Optional[Any] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
lowercase__ : Any = seq_length * self.model_tester.chunk_length
else:
lowercase__ : str = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
lowercase__ : Union[str, Any] = outputs.decoder_hidden_states
self.asseretIsInstance(__UpperCAmelCase , (list, tuple) )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
lowercase__ : int = getattr(self.model_tester , "seq_length" , __UpperCAmelCase )
lowercase__ : Any = getattr(self.model_tester , "decoder_seq_length" , __UpperCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Optional[Any] = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
lowercase__ : str = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def snake_case ( self : str ):
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase )
def snake_case ( self : str ):
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def snake_case ( self : int ):
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[int] = TFEfficientFormerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = True
lowercase__ : Dict = getattr(self.model_tester , "seq_length" , __UpperCAmelCase )
lowercase__ : List[str] = getattr(self.model_tester , "encoder_seq_length" , __UpperCAmelCase )
lowercase__ : List[Any] = getattr(self.model_tester , "key_length" , __UpperCAmelCase )
lowercase__ : List[str] = getattr(self.model_tester , "chunk_length" , __UpperCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
lowercase__ : Tuple = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : Dict = False
lowercase__ : Union[str, Any] = True
lowercase__ : Any = model_class(__UpperCAmelCase )
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) , training=__UpperCAmelCase )
lowercase__ : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : str = True
lowercase__ : Dict = model_class(__UpperCAmelCase )
lowercase__ : int = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) , training=__UpperCAmelCase )
lowercase__ : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def snake_case ( self : Optional[int] ):
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
lowercase__ : Any = model_class(__UpperCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
lowercase__ : str = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__UpperCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
lowercase__ : Optional[int] = model(__UpperCAmelCase )
self.assertTrue(outputs_dict is not None )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Tuple ):
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def snake_case ( self : Optional[int] ):
lowercase__ : Any = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
lowercase__ : Optional[int] = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Optional[Any] = image_processor(images=__UpperCAmelCase , return_tensors="tf" )
# forward pass
lowercase__ : Tuple = model(**__UpperCAmelCase , training=__UpperCAmelCase )
# verify the logits
lowercase__ : str = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
lowercase__ : Any = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def snake_case ( self : Optional[Any] ):
lowercase__ : List[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
lowercase__ : int = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : str = image_processor(images=__UpperCAmelCase , return_tensors="tf" )
# forward pass
lowercase__ : Union[str, Any] = model(**__UpperCAmelCase , training=__UpperCAmelCase )
# verify the logits
lowercase__ : str = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
lowercase__ : Any = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 130 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Dict = KandinskyVaaControlnetPipeline
UpperCAmelCase : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCAmelCase : Optional[Any] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCAmelCase : Dict = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase : Optional[int] = False
@property
def __snake_case ( self : Optional[Any]):
return 32
@property
def __snake_case ( self : Dict):
return 32
@property
def __snake_case ( self : Dict):
return self.time_input_dim
@property
def __snake_case ( self : Any):
return self.time_input_dim * 4
@property
def __snake_case ( self : str):
return 100
@property
def __snake_case ( self : str):
torch.manual_seed(0)
a : str = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
a : Dict = UNetaDConditionModel(**__UpperCAmelCase)
return model
@property
def __snake_case ( self : str):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __snake_case ( self : Union[str, Any]):
torch.manual_seed(0)
a : Dict = VQModel(**self.dummy_movq_kwargs)
return model
def __snake_case ( self : Optional[Any]):
a : Optional[Any] = self.dummy_unet
a : int = self.dummy_movq
a : str = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCAmelCase , )
a : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=0):
a : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
__UpperCAmelCase)
# create hint
a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
if str(__UpperCAmelCase).startswith("mps"):
a : Union[str, Any] = torch.manual_seed(__UpperCAmelCase)
else:
a : List[Any] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase)
a : str = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __snake_case ( self : Dict):
a : str = "cpu"
a : Tuple = self.get_dummy_components()
a : Dict = self.pipeline_class(**__UpperCAmelCase)
a : Optional[int] = pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCAmelCase))
a : Any = output.images
a : Any = pipe(
**self.get_dummy_inputs(__UpperCAmelCase) , return_dict=__UpperCAmelCase , )[0]
a : Union[str, Any] = image[0, -3:, -3:, -1]
a : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a : Tuple = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[int]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : List[str]):
a : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy")
a : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
a : List[Any] = torch.from_numpy(np.array(__UpperCAmelCase)).float() / 255.0
a : str = hint.permute(2 , 0 , 1).unsqueeze(0)
a : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(__UpperCAmelCase)
a : List[str] = KandinskyVaaControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
a : int = pipeline.to(__UpperCAmelCase)
pipeline.set_progress_bar_config(disable=__UpperCAmelCase)
a : Tuple = "A robot, 4k photo"
a : Any = torch.Generator(device="cuda").manual_seed(0)
a , a : int = pipe_prior(
__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
a : str = torch.Generator(device="cuda").manual_seed(0)
a : Union[str, Any] = pipeline(
image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , output_type="np" , )
a : str = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
| 40 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=16 , __UpperCamelCase=[1, 2, 1] , __UpperCamelCase=[2, 2, 4] , __UpperCamelCase=2 , __UpperCamelCase=2.0 , __UpperCamelCase=True , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase="gelu" , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=0.02 , __UpperCamelCase=1E-5 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=10 , __UpperCamelCase=8 , __UpperCamelCase=["stage1", "stage2", "stage3"] , __UpperCamelCase=[1, 2, 3] , ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Tuple = parent
__UpperCamelCase : int = batch_size
__UpperCamelCase : Tuple = image_size
__UpperCamelCase : Optional[int] = patch_size
__UpperCamelCase : Optional[Any] = num_channels
__UpperCamelCase : List[Any] = embed_dim
__UpperCamelCase : str = depths
__UpperCamelCase : Any = num_heads
__UpperCamelCase : Tuple = window_size
__UpperCamelCase : Any = mlp_ratio
__UpperCamelCase : int = qkv_bias
__UpperCamelCase : int = hidden_dropout_prob
__UpperCamelCase : Tuple = attention_probs_dropout_prob
__UpperCamelCase : str = drop_path_rate
__UpperCamelCase : List[str] = hidden_act
__UpperCamelCase : Union[str, Any] = use_absolute_embeddings
__UpperCamelCase : str = patch_norm
__UpperCamelCase : Any = layer_norm_eps
__UpperCamelCase : Dict = initializer_range
__UpperCamelCase : Tuple = is_training
__UpperCamelCase : Dict = scope
__UpperCamelCase : Any = use_labels
__UpperCamelCase : int = type_sequence_label_size
__UpperCamelCase : Tuple = encoder_stride
__UpperCamelCase : Any = out_features
__UpperCamelCase : Optional[int] = out_indices
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase : List[str] = None
if self.use_labels:
__UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : int = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
'''simple docstring'''
__UpperCamelCase : Any = MaskFormerSwinModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : Tuple = model(__UpperCamelCase )
__UpperCamelCase : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCamelCase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : List[str] = MaskFormerSwinBackbone(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : Dict = model(__UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(__UpperCamelCase ):
__UpperCamelCase : Tuple = ["stem"]
__UpperCamelCase : Tuple = MaskFormerSwinBackbone(config=__UpperCamelCase )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
__UpperCamelCase : List[Any] = config_and_inputs
__UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowercase : Union[str, Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowercase : Union[str, Any] = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
lowercase : Union[str, Any] = False
lowercase : str = False
lowercase : Optional[int] = False
lowercase : List[str] = False
lowercase : Union[str, Any] = False
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = MaskFormerSwinModelTester(self )
__UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__UpperCamelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
pass
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
return
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__UpperCamelCase )
@unittest.skip("Swin does not use inputs_embeds" )
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip("Swin does not support feedforward chunking" )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : Optional[Any] = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : List[str] = model_class(__UpperCamelCase )
__UpperCamelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase : Optional[Any] = [*signature.parameters.keys()]
__UpperCamelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
__UpperCamelCase : List[str] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
__UpperCamelCase : Any = outputs.hidden_states
__UpperCamelCase : Optional[int] = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# Swin has a different seq_length
__UpperCamelCase : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__UpperCamelCase : Optional[int] = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase : int = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : int = 3
__UpperCamelCase : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCamelCase : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCamelCase : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCamelCase : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__UpperCamelCase : str = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase : Dict = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def __lowerCamelCase ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
__UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__UpperCamelCase ):
__UpperCamelCase : Any = 0
return t
def check_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase={} ):
with torch.no_grad():
__UpperCamelCase : Tuple = model(**__UpperCamelCase , return_dict=__UpperCamelCase , **__UpperCamelCase )
__UpperCamelCase : int = model(**__UpperCamelCase , return_dict=__UpperCamelCase , **__UpperCamelCase ).to_tuple()
def recursive_check(__UpperCamelCase , __UpperCamelCase ):
if isinstance(__UpperCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__UpperCamelCase , __UpperCamelCase ):
recursive_check(__UpperCamelCase , __UpperCamelCase )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(__UpperCamelCase , __UpperCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__UpperCamelCase ) , set_nan_tensor_to_zero(__UpperCamelCase ) , atol=1E-5 ) , msg=(
"Tuple and dict output are not equal. Difference:"
f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
f''' {torch.isnan(__UpperCamelCase ).any()} and `inf`: {torch.isinf(__UpperCamelCase )}. Dict has'''
f''' `nan`: {torch.isnan(__UpperCamelCase ).any()} and `inf`: {torch.isinf(__UpperCamelCase )}.'''
) , )
recursive_check(__UpperCamelCase , __UpperCamelCase )
for model_class in self.all_model_classes:
__UpperCamelCase : str = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : str = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase : Any = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
check_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__UpperCamelCase : Tuple = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__UpperCamelCase : List[Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
check_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__UpperCamelCase : Any = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase : Dict = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
check_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , {"output_hidden_states": True} )
__UpperCamelCase : Union[str, Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__UpperCamelCase : Optional[int] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
check_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , {"output_hidden_states": True} )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , lowerCamelCase__ ):
"""simple docstring"""
lowercase : Optional[int] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowercase : Tuple = MaskFormerSwinConfig
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = MaskFormerSwinModelTester(self )
def __lowerCamelCase ( self ) -> Any:
'''simple docstring'''
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Dict = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
__UpperCamelCase : List[Any] = backbone_class(__UpperCamelCase )
backbone.to(__UpperCamelCase )
backbone.eval()
__UpperCamelCase : Union[str, Any] = backbone(**__UpperCamelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __UpperCamelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__UpperCamelCase : Optional[int] = backbone(**__UpperCamelCase , output_hidden_states=__UpperCamelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__UpperCamelCase : Optional[Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__UpperCamelCase : List[str] = backbone(**__UpperCamelCase , output_attentions=__UpperCamelCase )
self.assertIsNotNone(outputs.attentions ) | 357 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers | 171 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.