code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCAmelCase = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
UpperCAmelCase = None
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=__snake_case , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=__snake_case , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _snake_case ( __snake_case : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_lowerCamelCase : Optional[Any] = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def _snake_case ( __snake_case : Union[str, Any] ):
"""simple docstring"""
def remove_articles(__snake_case : List[str] ):
return ARTICLES_REGEX.sub(""" """ , __snake_case )
def white_space_fix(__snake_case : Optional[int] ):
return " ".join(text.split() )
def remove_punc(__snake_case : Optional[Any] ):
_lowerCamelCase : str = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__snake_case : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__snake_case ) ) ) )
def _snake_case ( __snake_case : Any ):
"""simple docstring"""
if not s:
return []
return normalize_answer(__snake_case ).split()
def _snake_case ( __snake_case : Dict , __snake_case : int ):
"""simple docstring"""
return int(normalize_answer(__snake_case ) == normalize_answer(__snake_case ) )
def _snake_case ( __snake_case : Tuple , __snake_case : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = get_tokens(__snake_case )
_lowerCamelCase : int = get_tokens(__snake_case )
_lowerCamelCase : Optional[Any] = collections.Counter(__snake_case ) & collections.Counter(__snake_case )
_lowerCamelCase : int = sum(common.values() )
if len(__snake_case ) == 0 or len(__snake_case ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_lowerCamelCase : List[str] = 1.0 * num_same / len(__snake_case )
_lowerCamelCase : Dict = 1.0 * num_same / len(__snake_case )
_lowerCamelCase : int = (2 * precision * recall) / (precision + recall)
return fa
def _snake_case ( __snake_case : Optional[Any] , __snake_case : str ):
"""simple docstring"""
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_lowerCamelCase : Tuple = qa["""id"""]
_lowerCamelCase : Optional[int] = [t for t in qa["""answers"""]["""text"""] if normalize_answer(__snake_case )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_lowerCamelCase : List[str] = [""""""]
if qid not in preds:
print(F'Missing prediction for {qid}' )
continue
_lowerCamelCase : Any = preds[qid]
# Take max over all gold answers
_lowerCamelCase : Tuple = max(compute_exact(__snake_case , __snake_case ) for a in gold_answers )
_lowerCamelCase : List[str] = max(compute_fa(__snake_case , __snake_case ) for a in gold_answers )
return exact_scores, fa_scores
def _snake_case ( __snake_case : List[Any] , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : str ):
"""simple docstring"""
_lowerCamelCase : str = {}
for qid, s in scores.items():
_lowerCamelCase : Optional[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
_lowerCamelCase : Any = float(not qid_to_has_ans[qid] )
else:
_lowerCamelCase : List[str] = s
return new_scores
def _snake_case ( __snake_case : Any , __snake_case : int , __snake_case : Any=None ):
"""simple docstring"""
if not qid_list:
_lowerCamelCase : int = len(__snake_case )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
_lowerCamelCase : List[str] = len(__snake_case )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def _snake_case ( __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : int ):
"""simple docstring"""
for k in new_eval:
_lowerCamelCase : Optional[int] = new_eval[k]
def _snake_case ( __snake_case : Tuple , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Tuple ):
"""simple docstring"""
plt.step(__snake_case , __snake_case , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(__snake_case , __snake_case , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__snake_case )
plt.savefig(__snake_case )
plt.clf()
def _snake_case ( __snake_case : int , __snake_case : Dict , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Union[str, Any]=None , __snake_case : List[Any]=None ):
"""simple docstring"""
_lowerCamelCase : Any = sorted(__snake_case , key=lambda __snake_case : na_probs[k] )
_lowerCamelCase : List[Any] = 0.0
_lowerCamelCase : Optional[Any] = 1.0
_lowerCamelCase : Tuple = 0.0
_lowerCamelCase : Tuple = [1.0]
_lowerCamelCase : List[Any] = [0.0]
_lowerCamelCase : Optional[int] = 0.0
for i, qid in enumerate(__snake_case ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_lowerCamelCase : List[str] = true_pos / float(i + 1 )
_lowerCamelCase : Optional[int] = true_pos / float(__snake_case )
if i == len(__snake_case ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__snake_case )
recalls.append(__snake_case )
if out_image:
plot_pr_curve(__snake_case , __snake_case , __snake_case , __snake_case )
return {"ap": 100.0 * avg_prec}
def _snake_case ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Optional[Any] ):
"""simple docstring"""
if out_image_dir and not os.path.exists(__snake_case ):
os.makedirs(__snake_case )
_lowerCamelCase : str = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_lowerCamelCase : List[str] = make_precision_recall_eval(
__snake_case , __snake_case , __snake_case , __snake_case , out_image=os.path.join(__snake_case , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
_lowerCamelCase : int = make_precision_recall_eval(
__snake_case , __snake_case , __snake_case , __snake_case , out_image=os.path.join(__snake_case , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
_lowerCamelCase : Union[str, Any] = {k: float(__snake_case ) for k, v in qid_to_has_ans.items()}
_lowerCamelCase : Optional[Any] = make_precision_recall_eval(
__snake_case , __snake_case , __snake_case , __snake_case , out_image=os.path.join(__snake_case , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(__snake_case , __snake_case , """pr_exact""" )
merge_eval(__snake_case , __snake_case , """pr_f1""" )
merge_eval(__snake_case , __snake_case , """pr_oracle""" )
def _snake_case ( __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] ):
"""simple docstring"""
if not qid_list:
return
_lowerCamelCase : Tuple = [na_probs[k] for k in qid_list]
_lowerCamelCase : int = np.ones_like(__snake_case ) / float(len(__snake_case ) )
plt.hist(__snake_case , weights=__snake_case , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(F'Histogram of no-answer probability: {name}' )
plt.savefig(os.path.join(__snake_case , F'na_prob_hist_{name}.png' ) )
plt.clf()
def _snake_case ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : Any ):
"""simple docstring"""
_lowerCamelCase : int = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_lowerCamelCase : Optional[Any] = num_no_ans
_lowerCamelCase : str = cur_score
_lowerCamelCase : Dict = 0.0
_lowerCamelCase : Optional[int] = sorted(__snake_case , key=lambda __snake_case : na_probs[k] )
for i, qid in enumerate(__snake_case ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_lowerCamelCase : List[str] = scores[qid]
else:
if preds[qid]:
_lowerCamelCase : str = -1
else:
_lowerCamelCase : List[str] = 0
cur_score += diff
if cur_score > best_score:
_lowerCamelCase : Any = cur_score
_lowerCamelCase : Dict = na_probs[qid]
return 100.0 * best_score / len(__snake_case ), best_thresh
def _snake_case ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : str = find_best_thresh(__snake_case , __snake_case , __snake_case , __snake_case )
_lowerCamelCase , _lowerCamelCase : List[Any] = find_best_thresh(__snake_case , __snake_case , __snake_case , __snake_case )
_lowerCamelCase : Dict = best_exact
_lowerCamelCase : Optional[int] = exact_thresh
_lowerCamelCase : Any = best_fa
_lowerCamelCase : Any = fa_thresh
def _snake_case ( ):
"""simple docstring"""
with open(OPTS.data_file ) as f:
_lowerCamelCase : Union[str, Any] = json.load(__snake_case )
_lowerCamelCase : Dict = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
_lowerCamelCase : Union[str, Any] = json.load(__snake_case )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_lowerCamelCase : Any = json.load(__snake_case )
else:
_lowerCamelCase : Optional[Any] = {k: 0.0 for k in preds}
_lowerCamelCase : List[Any] = make_qid_to_has_ans(__snake_case ) # maps qid to True/False
_lowerCamelCase : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if v]
_lowerCamelCase : Optional[int] = [k for k, v in qid_to_has_ans.items() if not v]
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = get_raw_scores(__snake_case , __snake_case )
_lowerCamelCase : Dict = apply_no_ans_threshold(__snake_case , __snake_case , __snake_case , OPTS.na_prob_thresh )
_lowerCamelCase : List[str] = apply_no_ans_threshold(__snake_case , __snake_case , __snake_case , OPTS.na_prob_thresh )
_lowerCamelCase : Optional[Any] = make_eval_dict(__snake_case , __snake_case )
if has_ans_qids:
_lowerCamelCase : Any = make_eval_dict(__snake_case , __snake_case , qid_list=__snake_case )
merge_eval(__snake_case , __snake_case , """HasAns""" )
if no_ans_qids:
_lowerCamelCase : List[Any] = make_eval_dict(__snake_case , __snake_case , qid_list=__snake_case )
merge_eval(__snake_case , __snake_case , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , OPTS.out_image_dir )
histogram_na_prob(__snake_case , __snake_case , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(__snake_case , __snake_case , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(__snake_case , __snake_case )
else:
print(json.dumps(__snake_case , indent=2 ) )
if __name__ == "__main__":
UpperCAmelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 88 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __magic_name__ ( ) -> str:
"""simple docstring"""
lowercase_ : Optional[int] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowercase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowercase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowercase )
return parser.parse_args()
def __magic_name__ ( ) -> List[Any]:
"""simple docstring"""
lowercase_ : Union[str, Any] = parse_args()
# Import training_script as a module.
lowercase_ : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase_ : Tuple = script_fpath.stem
lowercase_ : List[str] = importlib.import_module(lowercase )
# Patch sys.argv
lowercase_ : Optional[int] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main() | 458 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase ( lowercase_ : list[int] , lowercase_ : list[int] , lowercase_ : list[int] , lowercase_ : list[list[str]] , lowercase_ : int , ) -> None:
'''simple docstring'''
lowercase =len(lowercase_ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowercase_ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowercase_ , lowercase_ , )
def UpperCamelCase ( lowercase_ : int ) -> None:
'''simple docstring'''
lowercase =[]
depth_first_search([] , [] , [] , lowercase_ , lowercase_ )
# Print all the boards
for board in boards:
for column in board:
print(lowercase_ )
print('''''' )
print(len(lowercase_ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 701 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def UpperCamelCase ( lowercase_ : np.ndarray ) -> tuple[np.ndarray, np.ndarray]:
'''simple docstring'''
lowercase , lowercase =np.shape(lowercase_ )
if rows != columns:
lowercase =(
'''\'table\' has to be of square shaped array but got a '''
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(lowercase_ )
lowercase =np.zeros((rows, columns) )
lowercase =np.zeros((rows, columns) )
for i in range(lowercase_ ):
for j in range(lowercase_ ):
lowercase =sum(lower[i][k] * upper[k][j] for k in range(lowercase_ ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
lowercase =(table[i][j] - total) / upper[j][j]
lowercase =1
for j in range(lowercase_ , lowercase_ ):
lowercase =sum(lower[i][k] * upper[k][j] for k in range(lowercase_ ) )
lowercase =table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 145 | 0 |
"""simple docstring"""
def lowercase ( UpperCamelCase : str , UpperCamelCase : int ):
"""simple docstring"""
A__ : list[list[str]] =[[] for _ in range(UpperCamelCase )]
A__ : Union[str, Any] =key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(UpperCamelCase ) <= key:
return input_string
for position, character in enumerate(UpperCamelCase ):
A__ : Any =position % (lowest * 2) # puts it in bounds
A__ : List[str] =min(UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(UpperCamelCase )
A__ : str =["".join(UpperCamelCase ) for row in temp_grid]
A__ : Optional[Any] ="".join(UpperCamelCase )
return output_string
def lowercase ( UpperCamelCase : str , UpperCamelCase : int ):
"""simple docstring"""
A__ : Optional[Any] =[]
A__ : Optional[int] =key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
A__ : list[list[str]] =[[] for _ in range(UpperCamelCase )] # generates template
for position in range(len(UpperCamelCase ) ):
A__ : Any =position % (lowest * 2) # puts it in bounds
A__ : int =min(UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
A__ : str =0
for row in temp_grid: # fills in the characters
A__ : Union[str, Any] =input_string[counter : counter + len(UpperCamelCase )]
grid.append(list(UpperCamelCase ) )
counter += len(UpperCamelCase )
A__ : Optional[Any] ="" # reads as zigzag
for position in range(len(UpperCamelCase ) ):
A__ : Tuple =position % (lowest * 2) # puts it in bounds
A__ : Optional[Any] =min(UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def lowercase ( UpperCamelCase : str ):
"""simple docstring"""
A__ : Dict ={}
for key_guess in range(1 , len(UpperCamelCase ) ): # tries every key
A__ : Union[str, Any] =decrypt(UpperCamelCase , UpperCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | """simple docstring"""
import math
import tensorflow as tf
from packaging import version
def lowercase ( UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A__ : List[Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : List[Any] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : Optional[Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : Tuple =tf.cast(math.pi , x.dtype )
A__ : Dict =tf.cast(0.04_47_15 , x.dtype )
A__ : Optional[int] =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(UpperCamelCase , 3 )) ))
return x * cdf
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : List[str] =tf.convert_to_tensor(UpperCamelCase )
return x * tf.tanh(tf.math.softplus(UpperCamelCase ) )
def lowercase ( UpperCamelCase : List[str] ):
"""simple docstring"""
A__ : Union[str, Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : List[Any] =tf.cast(0.04_47_15 , x.dtype )
A__ : List[Any] =tf.cast(0.79_78_84_56_08 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
A__ : List[str] =tf.convert_to_tensor(UpperCamelCase )
A__ : str =tf.cast(1.7_02 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
return tf.clip_by_value(_gelu(UpperCamelCase ) , -10 , 10 )
def lowercase ( UpperCamelCase : str , UpperCamelCase : Any=-1 ):
"""simple docstring"""
A__ , A__ : Optional[Any] =tf.split(UpperCamelCase , 2 , axis=UpperCamelCase )
return a * tf.math.sigmoid(UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return tf.keras.activations.gelu(UpperCamelCase , approximate=UpperCamelCase )
__A : Optional[Any] = tf.keras.activations.gelu
__A : Optional[Any] = approximate_gelu_wrap
else:
__A : Any = _gelu
__A : Union[str, Any] = _gelu_new
__A : List[str] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 656 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A ) -> list:
_snake_case = []
_snake_case , _snake_case = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
_snake_case = result + left + right
return input_list
def SCREAMING_SNAKE_CASE__ ( __A ) -> list:
if len(__A ) <= 1:
return input_list
_snake_case = list(__A )
# iteration for two-way merging
_snake_case = 2
while p <= len(__A ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__A ) , __A ):
_snake_case = i
_snake_case = i + p - 1
_snake_case = (low + high + 1) // 2
_snake_case = merge(__A , __A , __A , __A )
# final merge of last two parts
if p * 2 >= len(__A ):
_snake_case = i
_snake_case = merge(__A , 0 , __A , len(__A ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowercase : int = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
lowercase : Any = []
else:
lowercase : Optional[Any] = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 542 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCAmelCase ( metaclass=_lowerCamelCase ):
__lowercase = ["""torch""", """torchsde"""]
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
requires_backends(self , ['torch', 'torchsde'] )
@classmethod
def lowerCamelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
requires_backends(cls , ['torch', 'torchsde'] )
@classmethod
def lowerCamelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
requires_backends(cls , ['torch', 'torchsde'] )
| 542 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
snake_case__ = LDMTextToImagePipeline
snake_case__ = TEXT_TO_IMAGE_PARAMS - {
'''negative_prompt''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
'''prompt_embeds''',
}
snake_case__ = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
snake_case__ = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCAmelCase = CLIPTextModel(__lowerCamelCase )
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(__lowerCamelCase ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(__lowerCamelCase )
else:
lowerCAmelCase = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = LDMTextToImagePipeline(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCAmelCase = self.get_dummy_inputs(__lowerCamelCase )
lowerCAmelCase = pipe(**__lowerCamelCase ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
lowerCAmelCase = np.array([0.6_101, 0.6_156, 0.5_622, 0.4_895, 0.6_661, 0.3_804, 0.5_748, 0.6_136, 0.5_014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self , _snake_case , _snake_case=torch.floataa , _snake_case=0 ):
"""simple docstring"""
lowerCAmelCase = torch.manual_seed(__lowerCamelCase )
lowerCAmelCase = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 32, 32) )
lowerCAmelCase = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCAmelCase = self.get_inputs(__lowerCamelCase )
lowerCAmelCase = pipe(**__lowerCamelCase ).images
lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 2_56, 2_56, 3)
lowerCAmelCase = np.array([0.51_825, 0.52_850, 0.52_543, 0.54_258, 0.52_304, 0.52_569, 0.54_363, 0.55_276, 0.56_878] )
lowerCAmelCase = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self , _snake_case , _snake_case=torch.floataa , _snake_case=0 ):
"""simple docstring"""
lowerCAmelCase = torch.manual_seed(__lowerCamelCase )
lowerCAmelCase = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 32, 32) )
lowerCAmelCase = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCAmelCase = self.get_inputs(__lowerCamelCase )
lowerCAmelCase = pipe(**__lowerCamelCase ).images[0]
lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
lowerCAmelCase = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 4 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
snake_case = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
snake_case = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
snake_case = dict(zip(vocab, range(len(vocab))))
snake_case = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(tmpdirname)
snake_case = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
snake_case = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
snake_case = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
snake_case = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
snake_case = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_0_0_0,
tgt_vocab_size=1_0_0_0,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
snake_case = FSMTForConditionalGeneration(config)
print(F"num of params {tiny_model.num_parameters()}")
# Test
snake_case = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
snake_case = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 103 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A_ : Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
A_ : int = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
A_ : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def UpperCamelCase__ ( __magic_name__ : str ) -> Optional[int]:
'''simple docstring'''
with open(__magic_name__ , """rb""" ) as f:
snake_case__ : int = Image.open(__magic_name__ )
return im.convert("""RGB""" )
@dataclass
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'''
} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCamelCase__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''A folder containing the training data.'''} )
lowerCamelCase__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''A folder containing the validation data.'''} )
lowerCamelCase__ = field(
default=0.1_5 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def __UpperCamelCase ( self ):
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"""You must specify either a dataset name from the hub or a train and/or validation directory.""" )
@dataclass
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = field(
default='''google/vit-base-patch16-224-in21k''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__SCREAMING_SNAKE_CASE )} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
lowerCamelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCamelCase__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Name or path of preprocessor config.'''} )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[int] = torch.stack([example["""pixel_values"""] for example in examples] )
snake_case__ : Optional[Any] = torch.tensor([example["""labels"""] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def UpperCamelCase__ ( ) -> List[str]:
'''simple docstring'''
snake_case__ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case__ , snake_case__ , snake_case__ : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_image_classification""" , __magic_name__ , __magic_name__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case__ : Tuple = training_args.get_process_log_level()
logger.setLevel(__magic_name__ )
transformers.utils.logging.set_verbosity(__magic_name__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
snake_case__ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case__ : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
snake_case__ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="""image-classification""" , use_auth_token=True if model_args.use_auth_token else None , )
else:
snake_case__ : Optional[Any] = {}
if data_args.train_dir is not None:
snake_case__ : str = os.path.join(data_args.train_dir , """**""" )
if data_args.validation_dir is not None:
snake_case__ : Tuple = os.path.join(data_args.validation_dir , """**""" )
snake_case__ : Optional[Any] = load_dataset(
"""imagefolder""" , data_files=__magic_name__ , cache_dir=model_args.cache_dir , task="""image-classification""" , )
# If we don't have a validation split, split off a percentage of train as validation.
snake_case__ : int = None if """validation""" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __magic_name__ ) and data_args.train_val_split > 0.0:
snake_case__ : List[Any] = dataset["""train"""].train_test_split(data_args.train_val_split )
snake_case__ : Any = split["""train"""]
snake_case__ : str = split["""test"""]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
snake_case__ : Dict = dataset["""train"""].features["""labels"""].names
snake_case__ , snake_case__ : Tuple = {}, {}
for i, label in enumerate(__magic_name__ ):
snake_case__ : List[str] = str(__magic_name__ )
snake_case__ : str = label
# Load the accuracy metric from the datasets package
snake_case__ : Union[str, Any] = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__magic_name__ : int ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
snake_case__ : int = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__magic_name__ ) , labelaid=__magic_name__ , idalabel=__magic_name__ , finetuning_task="""image-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case__ : Optional[Any] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
snake_case__ : List[str] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
snake_case__ : Optional[int] = image_processor.size["""shortest_edge"""]
else:
snake_case__ : Optional[int] = (image_processor.size["""height"""], image_processor.size["""width"""])
snake_case__ : str = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
snake_case__ : List[str] = Compose(
[
RandomResizedCrop(__magic_name__ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
snake_case__ : str = Compose(
[
Resize(__magic_name__ ),
CenterCrop(__magic_name__ ),
ToTensor(),
normalize,
] )
def train_transforms(__magic_name__ : Any ):
snake_case__ : Any = [
_train_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]
]
return example_batch
def val_transforms(__magic_name__ : Union[str, Any] ):
snake_case__ : Optional[Any] = [_val_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
snake_case__ : Tuple = (
dataset["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(__magic_name__ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
snake_case__ : str = (
dataset["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(__magic_name__ )
# Initalize our trainer
snake_case__ : int = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=dataset["""train"""] if training_args.do_train else None , eval_dataset=dataset["""validation"""] if training_args.do_eval else None , compute_metrics=__magic_name__ , tokenizer=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
snake_case__ : List[Any] = None
if training_args.resume_from_checkpoint is not None:
snake_case__ : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case__ : int = last_checkpoint
snake_case__ : str = trainer.train(resume_from_checkpoint=__magic_name__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case__ : Tuple = trainer.evaluate()
trainer.log_metrics("""eval""" , __magic_name__ )
trainer.save_metrics("""eval""" , __magic_name__ )
# Write model card and (optionally) push to hub
snake_case__ : Optional[int] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """image-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""image-classification""", """vision"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__magic_name__ )
else:
trainer.create_model_card(**__magic_name__ )
if __name__ == "__main__":
main()
| 419 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : int ) -> tuple[int, int]:
'''simple docstring'''
if b == 0:
return (1, 0)
((snake_case__) , (snake_case__)) : Optional[Any] = extended_euclid(__magic_name__ , a % b )
snake_case__ : Optional[Any] = a // b
return (y, x - k * y)
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> int:
'''simple docstring'''
((snake_case__) , (snake_case__)) : Any = extended_euclid(__magic_name__ , __magic_name__ )
snake_case__ : Union[str, Any] = na * na
snake_case__ : Optional[Any] = ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : int ) -> int:
'''simple docstring'''
((snake_case__) , (snake_case__)) : Dict = extended_euclid(__magic_name__ , __magic_name__ )
if b < 0:
snake_case__ : List[Any] = (b % n + n) % n
return b
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> int:
'''simple docstring'''
snake_case__ , snake_case__ : List[str] = invert_modulo(__magic_name__ , __magic_name__ ), invert_modulo(__magic_name__ , __magic_name__ )
snake_case__ : Union[str, Any] = na * na
snake_case__ : Dict = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 419 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case : int = logging.get_logger(__name__)
__snake_case : Optional[Any] = {
"nielsr/canine-s": 2048,
}
# Unicode defines 1,114,112 total “codepoints”
__snake_case : Any = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
__snake_case : int = 0
__snake_case : Optional[int] = 0XE_0_0_0
__snake_case : Any = 0XE_0_0_1
__snake_case : Dict = 0XE_0_0_2
__snake_case : int = 0XE_0_0_3
__snake_case : Tuple = 0XE_0_0_4
# Maps special codepoints to human-readable names.
__snake_case : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
__snake_case : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class A ( a ):
__UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , snake_case_=chr(snake_case_ ) , snake_case_=chr(snake_case_ ) , snake_case_=chr(snake_case_ ) , snake_case_=chr(snake_case_ ) , snake_case_=chr(snake_case_ ) , snake_case_=chr(snake_case_ ) , snake_case_=False , snake_case_=2_0_4_8 , **snake_case_ , ) -> Tuple:
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , model_max_length=snake_case_ , **snake_case_ , )
# Creates a mapping for looking up the IDs of special symbols.
_a = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
_a = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
_a = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
_a = UNICODE_VOCAB_SIZE
_a = len(self._special_codepoints )
@property
def __lowerCAmelCase ( self ) -> int:
return self._unicode_vocab_size
def __lowerCAmelCase ( self , snake_case_ ) -> List[str]:
return list(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> int:
try:
return ord(snake_case_ )
except TypeError:
raise ValueError(F'''invalid token: \'{token}\'''' )
def __lowerCAmelCase ( self , snake_case_ ) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(snake_case_ )
except TypeError:
raise ValueError(F'''invalid id: {index}''' )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
return "".join(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
_a = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
_a = [1] + ([0] * len(snake_case_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(snake_case_ )) + [1]
return result
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
_a = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[str]:
return ()
| 131 |
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _lowercase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : Any, lowerCamelCase__ : Tuple=[] ):
_a = size[0] - overlap_pixels * 2
_a = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
_a = np.ones((size_y, size_x), dtype=np.uinta ) * 255
_a = np.pad(lowerCamelCase__, mode="linear_ramp", pad_width=lowerCamelCase__, end_values=0 )
if "l" in remove_borders:
_a = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
_a = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
_a = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
_a = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _lowercase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict, lowerCamelCase__ : Dict ):
return max(lowerCamelCase__, min(lowerCamelCase__, lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ : [int], lowerCamelCase__ : [int], lowerCamelCase__ : [int] ):
return (
clamp(rect[0], min[0], max[0] ),
clamp(rect[1], min[1], max[1] ),
clamp(rect[2], min[0], max[0] ),
clamp(rect[3], min[1], max[1] ),
)
def _lowercase ( lowerCamelCase__ : [int], lowerCamelCase__ : int, lowerCamelCase__ : [int] ):
_a = list(lowerCamelCase__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
_a = clamp_rect(lowerCamelCase__, [0, 0], [image_size[0], image_size[1]] )
return rect
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ):
_a = Image.new("RGB", (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]), Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ), (0, 0), )
result.paste(lowerCamelCase__, (original_slice, 0) )
return result
def _lowercase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int] ):
_a = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
_a = tile.crop(lowerCamelCase__ )
return tile
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Tuple ):
_a = n % d
return n - divisor
class A ( a ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 3_5_0 , ) -> int:
super().__init__(
vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , unet=snake_case_ , low_res_scheduler=snake_case_ , scheduler=snake_case_ , max_noise_level=snake_case_ , )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> List[Any]:
torch.manual_seed(0 )
_a = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
_a = add_overlap_rect(snake_case_ , snake_case_ , image.size )
_a = image.crop(snake_case_ )
_a = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
_a = translated_slice_x - (original_image_slice / 2)
_a = max(0 , snake_case_ )
_a = squeeze_tile(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_a = to_input.size
_a = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
_a = super(snake_case_ , self ).__call__(image=snake_case_ , **snake_case_ ).images[0]
_a = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
_a = unsqueeze_tile(snake_case_ , snake_case_ )
_a = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
_a = []
if x == 0:
remove_borders.append("l" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("r" )
if y == 0:
remove_borders.append("t" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("b" )
_a = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=snake_case_ ) , mode="L" , )
final_image.paste(
snake_case_ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , snake_case_ )
@torch.no_grad()
def __call__( self , snake_case_ , snake_case_ , snake_case_ = 7_5 , snake_case_ = 9.0 , snake_case_ = 5_0 , snake_case_ = None , snake_case_ = 1 , snake_case_ = 0.0 , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = 1 , snake_case_ = 1_2_8 , snake_case_ = 3_2 , snake_case_ = 3_2 , ) -> List[str]:
_a = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) )
_a = math.ceil(image.size[0] / tile_size )
_a = math.ceil(image.size[1] / tile_size )
_a = tcx * tcy
_a = 0
for y in range(snake_case_ ):
for x in range(snake_case_ ):
self._process_tile(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , prompt=snake_case_ , num_inference_steps=snake_case_ , guidance_scale=snake_case_ , noise_level=snake_case_ , negative_prompt=snake_case_ , num_images_per_prompt=snake_case_ , eta=snake_case_ , generator=snake_case_ , latents=snake_case_ , )
current_count += 1
if callback is not None:
callback({"progress": current_count / total_tile_count, "image": final_image} )
return final_image
def _lowercase ( ):
# Run a demo
_a = "stabilityai/stable-diffusion-x4-upscaler"
_a = StableDiffusionTiledUpscalePipeline.from_pretrained(lowerCamelCase__, revision="fp16", torch_dtype=torch.floataa )
_a = pipe.to("cuda" )
_a = Image.open("../../docs/source/imgs/diffusers_library.jpg" )
def callback(lowerCamelCase__ : Dict ):
print(F'''progress: {obj['progress']:.4f}''' )
obj["image"].save("diffusers_library_progress.jpg" )
_a = pipe(image=lowerCamelCase__, prompt="Black font, white background, vector", noise_level=40, callback=lowerCamelCase__ )
final_image.save("diffusers_library.jpg" )
if __name__ == "__main__":
main()
| 131 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
__magic_name__ = list[list[float | int]]
def lowerCamelCase ( lowerCamelCase : Matrix , lowerCamelCase : Matrix):
A_ : int = len(lowerCamelCase)
A_ : Matrix = [[0 for _ in range(size + 1)] for _ in range(lowerCamelCase)]
A_ : int
A_ : int
A_ : int
A_ : int
A_ : int
A_ : float
for row in range(lowerCamelCase):
for col in range(lowerCamelCase):
A_ : List[Any] = matrix[row][col]
A_ : Union[str, Any] = vector[row][0]
A_ : str = 0
A_ : Dict = 0
while row < size and col < size:
# pivoting
A_ : Tuple = max((abs(augmented[rowa][col]), rowa) for rowa in range(lowerCamelCase , lowerCamelCase))[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
A_ : List[str] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowerCamelCase):
A_ : List[str] = augmented[rowa][col] / augmented[row][col]
A_ : str = 0
for cola in range(col + 1 , size + 1):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowerCamelCase):
for row in range(lowerCamelCase):
A_ : Union[str, Any] = augmented[row][col] / augmented[col][col]
for cola in range(lowerCamelCase , size + 1):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10)] for row in range(lowerCamelCase)
]
def lowerCamelCase ( lowerCamelCase : list[int]):
A_ : int = len(lowerCamelCase)
A_ : Matrix = [[0 for _ in range(lowerCamelCase)] for _ in range(lowerCamelCase)]
A_ : Matrix = [[0] for _ in range(lowerCamelCase)]
A_ : Matrix
A_ : int
A_ : int
A_ : int
for x_val, y_val in enumerate(lowerCamelCase):
for col in range(lowerCamelCase):
A_ : str = (x_val + 1) ** (size - col - 1)
A_ : Tuple = y_val
A_ : Tuple = solve(lowerCamelCase , lowerCamelCase)
def interpolated_func(lowerCamelCase : int) -> int:
return sum(
round(coeffs[x_val][0]) * (var ** (size - x_val - 1))
for x_val in range(lowerCamelCase))
return interpolated_func
def lowerCamelCase ( lowerCamelCase : int):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase ( lowerCamelCase : Callable[[int], int] = question_function , lowerCamelCase : int = 10):
A_ : list[int] = [func(lowerCamelCase) for x_val in range(1 , order + 1)]
A_ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff]) for max_coeff in range(1 , order + 1)
]
A_ : int = 0
A_ : Callable[[int], int]
A_ : int
for poly in polynomials:
A_ : int = 1
while func(lowerCamelCase) == poly(lowerCamelCase):
x_val += 1
ret += poly(lowerCamelCase)
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 706 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """deberta-v2"""
def __init__( self : Optional[Any] ,_a : Union[str, Any]=128100 ,_a : Optional[int]=1536 ,_a : Dict=24 ,_a : int=24 ,_a : Tuple=6144 ,_a : Union[str, Any]="gelu" ,_a : List[Any]=0.1 ,_a : Dict=0.1 ,_a : int=512 ,_a : int=0 ,_a : int=0.02 ,_a : int=1e-7 ,_a : List[str]=False ,_a : Union[str, Any]=-1 ,_a : List[Any]=0 ,_a : Optional[Any]=True ,_a : Tuple=None ,_a : Any=0 ,_a : int="gelu" ,**_a : Any ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Union[str, Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : List[Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : List[Any] = initializer_range
A_ : int = relative_attention
A_ : Tuple = max_relative_positions
A_ : int = pad_token_id
A_ : Tuple = position_biased_input
# Backwards compatibility
if type(_a ) == str:
A_ : str = [x.strip() for x in pos_att_type.lower().split("""|""" )]
A_ : Any = pos_att_type
A_ : Optional[int] = vocab_size
A_ : Tuple = layer_norm_eps
A_ : Any = kwargs.get("""pooler_hidden_size""" ,_a )
A_ : Union[str, Any] = pooler_dropout
A_ : List[Any] = pooler_hidden_act
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : Any = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
return 12
def _a ( self : int ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 3 ,_a : int = 40 ,_a : int = 40 ,_a : "PreTrainedTokenizerBase" = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(preprocessor=_a ,framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 27 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@property
def lowerCamelCase(self ):
torch.manual_seed(0 )
A_ : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def lowerCamelCase(self ):
torch.manual_seed(0 )
A_ : Optional[int] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def lowerCamelCase(self ):
torch.manual_seed(0 )
A_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(lowerCAmelCase_ )
def lowerCamelCase(self ):
A_ : List[str] = self.dummy_uncond_unet
A_ : Optional[Any] = DDIMScheduler()
A_ : List[str] = self.dummy_vq_model
A_ : Dict = LDMPipeline(unet=lowerCAmelCase_ , vqvae=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
ldm.to(lowerCAmelCase_ )
ldm.set_progress_bar_config(disable=lowerCAmelCase_ )
A_ : Optional[int] = torch.manual_seed(0 )
A_ : Any = ldm(generator=lowerCAmelCase_ , num_inference_steps=2 , output_type="""numpy""" ).images
A_ : List[str] = torch.manual_seed(0 )
A_ : Optional[int] = ldm(generator=lowerCAmelCase_ , num_inference_steps=2 , output_type="""numpy""" , return_dict=lowerCAmelCase_ )[0]
A_ : Union[str, Any] = image[0, -3:, -3:, -1]
A_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : Dict = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
A_ : Tuple = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase(self ):
A_ : Dict = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(lowerCAmelCase_ )
ldm.set_progress_bar_config(disable=lowerCAmelCase_ )
A_ : Dict = torch.manual_seed(0 )
A_ : List[str] = ldm(generator=lowerCAmelCase_ , num_inference_steps=5 , output_type="""numpy""" ).images
A_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A_ : Tuple = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] )
A_ : str = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 180 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False , ):
super().__init__()
A_ : Tuple = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
A_ : List[str] = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
A_ : Any = False
A_ : Tuple = nn.Dropout(p=lowerCAmelCase_ )
A_ : List[str] = TaConfig(
vocab_size=lowerCAmelCase_ , d_model=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , feed_forward_proj=lowerCAmelCase_ , is_decoder=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , )
A_ : Optional[Any] = nn.ModuleList()
for lyr_num in range(lowerCAmelCase_ ):
A_ : Tuple = TaBlock(lowerCAmelCase_ )
self.encoders.append(lowerCAmelCase_ )
A_ : Any = TaLayerNorm(lowerCAmelCase_ )
A_ : Union[str, Any] = nn.Dropout(p=lowerCAmelCase_ )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : List[Any] = self.token_embedder(lowerCAmelCase_ )
A_ : Optional[Any] = encoder_input_tokens.shape[1]
A_ : Any = torch.arange(lowerCAmelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCAmelCase_ )
A_ : Optional[Any] = self.dropout_pre(lowerCAmelCase_ )
# inverted the attention mask
A_ : int = encoder_input_tokens.size()
A_ : Optional[Any] = self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ )
for lyr in self.encoders:
A_ : int = lyr(lowerCAmelCase_ , lowerCAmelCase_ )[0]
A_ : List[str] = self.layer_norm(lowerCAmelCase_ )
return self.dropout_post(lowerCAmelCase_ ), encoder_inputs_mask
| 180 | 1 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : List[Any] = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = DebertaVaTokenizer
lowerCAmelCase = DebertaVaTokenizerFast
lowerCAmelCase = True
lowerCAmelCase = True
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__A : Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , unk_token='<unk>')
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = 'this is a test'
__A : Dict = 'this is a test'
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = '<pad>'
__A : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase) , _UpperCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase) , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<pad>')
self.assertEqual(vocab_keys[1] , '<unk>')
self.assertEqual(vocab_keys[-1] , '[PAD]')
self.assertEqual(len(_UpperCAmelCase) , 3_0001)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = ' \tHeLLo!how \n Are yoU? '
__A : Optional[int] = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
__A : Any = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase)
__A : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase))
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : List[str] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase)
__A : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase))
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = 'I was born in 92000, and this is falsé.'
__A : str = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
__A : List[Any] = DebertaVaTokenizer(_UpperCAmelCase , split_by_punct=_UpperCAmelCase)
__A : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase))
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : Any = DebertaVaTokenizerFast(_UpperCAmelCase , split_by_punct=_UpperCAmelCase)
__A : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase))
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = 'I was born in 92000, and this is falsé.'
__A : Optional[Any] = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
__A : List[Any] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase)
__A : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase))
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : int = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase)
__A : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase))
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = 'I was born in 92000, and this is falsé.'
__A : List[Any] = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
__A : int = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase)
__A : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase))
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : List[str] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase)
__A : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase))
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = 'I was born in 92000, and this is falsé.'
__A : Optional[Any] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
__A : Tuple = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase)
__A : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase))
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase)
__A : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase))
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = ' \tHeLLo!how \n Are yoU? '
__A : Optional[int] = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
__A : Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase)
__A : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase))
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase)
__A : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase))
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.get_tokenizer()
__A : List[str] = self.get_rust_tokenizer()
__A : Dict = 'I was born in 92000, and this is falsé.'
__A : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase))
__A : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase))
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : List[Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase)
__A : Optional[Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : int = self.get_rust_tokenizer()
__A : List[str] = tokenizer.encode(_UpperCAmelCase)
__A : int = rust_tokenizer.encode(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = 'This is a test'
__A : int = [13, 1, 4398, 25, 21, 1289]
__A : Dict = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
__A : Tuple = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
__A : Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase)
__A : Optional[int] = DebertaVaTokenizerFast(_UpperCAmelCase , keep_accents=_UpperCAmelCase)
__A : str = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = tokenizer.tokenize(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[Any] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : List[str] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : Any = rust_tokenizer.tokenize(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : int = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
# fmt: off
__A : Union[str, Any] = 'I was born in 92000, and this is falsé.'
__A : Union[str, Any] = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
__A : int = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
__A : List[Any] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
__A : List[str] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : str = tokenizer.tokenize(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : int = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = rust_tokenizer.tokenize(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : List[Any] = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = DebertaVaTokenizer(_UpperCAmelCase)
__A : List[str] = tokenizer.encode('sequence builders')
__A : Optional[Any] = tokenizer.encode('multi-sequence build')
__A : Any = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase)
__A : Tuple = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _UpperCAmelCase)
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _UpperCAmelCase , )
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , ) | 338 |
'''simple docstring'''
import sys
import turtle
def _lowerCAmelCase ( __snake_case : tuple[float, float] , __snake_case : tuple[float, float] ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def _lowerCAmelCase ( __snake_case : tuple[float, float] , __snake_case : tuple[float, float] , __snake_case : tuple[float, float] , __snake_case : int , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(__snake_case , get_mid(__snake_case , __snake_case ) , get_mid(__snake_case , __snake_case ) , depth - 1 )
triangle(__snake_case , get_mid(__snake_case , __snake_case ) , get_mid(__snake_case , __snake_case ) , depth - 1 )
triangle(__snake_case , get_mid(__snake_case , __snake_case ) , get_mid(__snake_case , __snake_case ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
lowercase__ : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
lowercase__ : Optional[Any] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1])) | 338 | 1 |
"""simple docstring"""
from __future__ import annotations
__magic_name__ : List[Any] = list[list[int]]
# assigning initial values to the grid
__magic_name__ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__magic_name__ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
if location := find_empty_location(SCREAMING_SNAKE_CASE ):
UpperCamelCase , UpperCamelCase : int = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = digit
if sudoku(SCREAMING_SNAKE_CASE ) is not None:
return grid
UpperCamelCase : str = 0
return None
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
for row in grid:
for cell in row:
print(SCREAMING_SNAKE_CASE , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 2_0)
print_solution(example_grid)
print("""\nExample grid solution:""")
__magic_name__ : Tuple = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 102 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class A_ (a_ ):
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
class A_ (a_ ):
def __init__( self , _A=1 , _A=0 , _A=2 , _A=5_1_2 , _A="cls" , _A=False , _A=True , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
UpperCAmelCase = project_dim
UpperCAmelCase = pooler_fn
UpperCAmelCase = learn_encoder
UpperCAmelCase = use_attention_mask
class A_ (a_ ):
UpperCAmelCase__ = [r'''pooler''', r'''logit_scale''']
UpperCAmelCase__ = [r'''position_ids''', r'''predictions.decoder.bias''']
UpperCAmelCase__ = '''roberta'''
UpperCAmelCase__ = RobertaSeriesConfig
def __init__( self , _A ):
'''simple docstring'''
super().__init__(_A )
UpperCAmelCase = XLMRobertaModel(_A )
UpperCAmelCase = nn.Linear(config.hidden_size , config.project_dim )
UpperCAmelCase = getattr(_A , '''has_pre_transformation''' , _A )
if self.has_pre_transformation:
UpperCAmelCase = nn.Linear(config.hidden_size , config.project_dim )
UpperCAmelCase = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _lowercase ( self , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , ):
'''simple docstring'''
UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase = self.base_model(
input_ids=_A , attention_mask=_A , token_type_ids=_A , position_ids=_A , head_mask=_A , inputs_embeds=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , output_attentions=_A , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_A , )
if self.has_pre_transformation:
UpperCAmelCase = outputs['''hidden_states'''][-2]
UpperCAmelCase = self.pre_LN(_A )
UpperCAmelCase = self.transformation_pre(_A )
return TransformationModelOutput(
projection_state=_A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
UpperCAmelCase = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=_A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 130 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Dict = "levit"
def __init__(self : Union[str, Any] , _A : List[Any]=2_2_4 , _A : Dict=3 , _A : List[str]=3 , _A : Union[str, Any]=2 , _A : Optional[int]=1 , _A : str=1_6 , _A : Any=[1_2_8, 2_5_6, 3_8_4] , _A : Union[str, Any]=[4, 8, 1_2] , _A : List[str]=[4, 4, 4] , _A : Union[str, Any]=[1_6, 1_6, 1_6] , _A : Optional[int]=0 , _A : str=[2, 2, 2] , _A : Optional[Any]=[2, 2, 2] , _A : Optional[int]=0.02 , **_A : Union[str, Any] , ) -> List[str]:
super().__init__(**_A )
snake_case = image_size
snake_case = num_channels
snake_case = kernel_size
snake_case = stride
snake_case = padding
snake_case = hidden_sizes
snake_case = num_attention_heads
snake_case = depths
snake_case = key_dim
snake_case = drop_path_rate
snake_case = patch_size
snake_case = attention_ratio
snake_case = mlp_ratio
snake_case = initializer_range
snake_case = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Dict = version.parse("1.11" )
@property
def UpperCAmelCase(self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase(self : List[str] ) -> float:
return 1E-4
| 706 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = "▁"
_A = {"vocab_file": "spiece.model"}
_A = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
_A = {
"google/pegasus-xsum": 5_12,
}
_A = logging.get_logger(__name__)
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Optional[Any] = ["input_ids", "attention_mask"]
def __init__(self : Optional[Any] , _A : Any , _A : List[Any]="<pad>" , _A : int="</s>" , _A : Dict="<unk>" , _A : str="<mask_2>" , _A : Optional[int]="<mask_1>" , _A : Optional[Any]=None , _A : Tuple=1_0_3 , _A : Optional[Dict[str, Any]] = None , **_A : List[str] , ) -> None:
snake_case = offset
if additional_special_tokens is not None:
if not isinstance(_A , _A ):
raise TypeError(
f'additional_special_tokens should be of type {type(_A )}, but is'
f' {type(_A )}' )
snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(_A ) , self.offset - 1 )
]
if len(set(_A ) ) != len(_A ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
snake_case = additional_special_tokens_extended
else:
snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )]
snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_A , unk_token=_A , mask_token=_A , pad_token=_A , mask_token_sent=_A , offset=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
snake_case = mask_token_sent
snake_case = vocab_file
snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
# add special tokens to encoder dict
snake_case = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
snake_case = {v: k for k, v in self.encoder.items()}
@property
def UpperCAmelCase(self : str ) -> int:
return len(self.sp_model ) + self.offset
def UpperCAmelCase(self : List[str] ) -> Dict[str, int]:
snake_case = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : Any ) -> List[Any]:
snake_case = self.__dict__.copy()
snake_case = None
return state
def __setstate__(self : str , _A : Union[str, Any] ) -> Tuple:
snake_case = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case = {}
snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase(self : List[Any] , _A : str ) -> List[str]:
return self.sp_model.encode(_A , out_type=_A )
def UpperCAmelCase(self : List[str] , _A : str ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
snake_case = self.sp_model.piece_to_id(_A )
return sp_id + self.offset
def UpperCAmelCase(self : Union[str, Any] , _A : int ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
snake_case = self.sp_model.IdToPiece(index - self.offset )
return token
def UpperCAmelCase(self : List[Any] , _A : Tuple ) -> Tuple:
snake_case = []
snake_case = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_A ) + token
snake_case = []
else:
current_sub_tokens.append(_A )
out_string += self.sp_model.decode(_A )
return out_string.strip()
def UpperCAmelCase(self : List[Any] , _A : Tuple=False ) -> Tuple:
return 1
def UpperCAmelCase(self : Tuple , _A : Optional[int] ) -> Tuple:
snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCAmelCase(self : str , _A : List , _A : Optional[List] = None , _A : bool = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(_A )
elif token_ids_a is None:
return self._special_token_mask(_A ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCAmelCase(self : int , _A : Dict , _A : List[Any]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase(self : Optional[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(_A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case = os.path.join(
_A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , "wb" ) as fi:
snake_case = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 294 | 0 |
'''simple docstring'''
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 208 |
'''simple docstring'''
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class _UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase: List[Any] = inspect.getfile(accelerate.test_utils )
__lowerCamelCase: List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] )
__lowerCamelCase: Optional[Any] = ['accelerate', 'launch']
__lowerCamelCase: List[str] = Path.home() / '.cache/huggingface/accelerate'
__lowerCamelCase: Dict = 'default_config.yaml'
__lowerCamelCase: Union[str, Any] = config_folder / config_file
__lowerCamelCase: Tuple = config_folder / '_default_config.yaml'
__lowerCamelCase: Union[str, Any] = Path('tests/test_configs' )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] ):
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowerCAmelCase__ ( cls : str ):
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : List[str] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=a ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(a ), self.test_file_path] , env=os.environ.copy() )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class _UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase: Tuple = 'test-tpu'
__lowerCamelCase: Dict = 'us-central1-a'
__lowerCamelCase: List[str] = 'ls'
__lowerCamelCase: Optional[int] = ['accelerate', 'tpu-config']
__lowerCamelCase: Optional[Any] = 'cd /usr/share'
__lowerCamelCase: Any = 'tests/test_samples/test_command_file.sh'
__lowerCamelCase: Tuple = 'Running gcloud compute tpus tpu-vm ssh'
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : str = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=a , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a , )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Optional[int] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a , )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Union[str, Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=a )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a , )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=a , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a , )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Optional[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=a , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a , )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Optional[Any] = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=a , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a , )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : List[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a , )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Union[str, Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=a , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a , )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Optional[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=a , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a , )
| 620 | 0 |
"""simple docstring"""
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[Any] ) -> str:
'''simple docstring'''
return f"gaussian_noise_s={seed}_shape={'_'.join([str(__lowerCamelCase ) for s in shape] )}.npy"
def __lowerCAmelCase ( self :Tuple ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :List[Any]=0 , lowerCamelCase_ :List[Any]=(4, 4, 64, 64) , lowerCamelCase_ :Dict=False ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE : Any = jnp.array(load_hf_numpy(self.get_file_format(__lowerCamelCase , __lowerCamelCase ) ) , dtype=__lowerCamelCase )
return image
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Dict=False , lowerCamelCase_ :Optional[Any]="CompVis/stable-diffusion-v1-4" ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE : Optional[int] = "bf16" if fpaa else None
SCREAMING_SNAKE_CASE : Optional[int] = FlaxUNetaDConditionModel.from_pretrained(
__lowerCamelCase , subfolder='''unet''' , dtype=__lowerCamelCase , revision=__lowerCamelCase )
return model, params
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Tuple=0 , lowerCamelCase_ :List[Any]=(4, 77, 7_68) , lowerCamelCase_ :Dict=False ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.array(load_hf_numpy(self.get_file_format(__lowerCamelCase , __lowerCamelCase ) ) , dtype=__lowerCamelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 10_00, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__lowerCamelCase )
SCREAMING_SNAKE_CASE : int = self.get_latents(__lowerCamelCase , fpaa=__lowerCamelCase )
SCREAMING_SNAKE_CASE : int = self.get_encoder_hidden_states(__lowerCamelCase , fpaa=__lowerCamelCase )
SCREAMING_SNAKE_CASE : int = model.apply(
{'''params''': params} , __lowerCamelCase , jnp.array(__lowerCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=__lowerCamelCase , ).sample
assert sample.shape == latents.shape
SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : List[Any] = jnp.array(__lowerCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 10_00, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = self.get_latents(__lowerCamelCase , shape=(4, 4, 96, 96) , fpaa=__lowerCamelCase )
SCREAMING_SNAKE_CASE : str = self.get_encoder_hidden_states(__lowerCamelCase , shape=(4, 77, 10_24) , fpaa=__lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.apply(
{'''params''': params} , __lowerCamelCase , jnp.array(__lowerCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=__lowerCamelCase , ).sample
assert sample.shape == latents.shape
SCREAMING_SNAKE_CASE : str = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.array(__lowerCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-2 )
| 705 |
"""simple docstring"""
def __A ( a_ : int )-> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
SCREAMING_SNAKE_CASE : Optional[int] = [True] * (num + 1)
SCREAMING_SNAKE_CASE : Optional[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a_ ):
SCREAMING_SNAKE_CASE : Any = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : str = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 18 | 0 |
'''simple docstring'''
def _a (lowercase__ : str , lowercase__ : int ) -> list[str]:
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(lowercase__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def UpperCamelCase ( _a , _a , _a=0 ) -> str:
'''simple docstring'''
if name is None:
lowercase_ :Optional[int] = None
else:
lowercase_ :Any = '''.''' * max(0 , spaces - 2 ) + '''# {:''' + str(5_0 - spaces ) + '''s}'''
lowercase_ :List[Any] = fmt.format(_a )
# Print and recurse (if needed).
if isinstance(_a , _a ):
if msg is not None:
print(_a )
for k in val.keys():
recursive_print(_a , val[k] , spaces + 2 )
elif isinstance(_a , torch.Tensor ):
print(_a , ''':''' , val.size() )
else:
print(_a , ''':''' , _a )
def UpperCamelCase ( _a , _a , _a , _a , _a ) -> Optional[Any]:
'''simple docstring'''
lowercase_ :List[str] = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowercase_ :Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowercase_ :int = param.view(*_a )
lowercase_ :Any = param.transpose(0 , 2 )
lowercase_ :Optional[Any] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowercase_ :Optional[int] = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowercase_ :Union[str, Any] = param.view(*_a )
lowercase_ :Tuple = param.transpose(0 , 1 ).contiguous()
lowercase_ :Dict = param.view(*_a )
return param
def UpperCamelCase ( _a , _a , _a ) -> Optional[int]:
'''simple docstring'''
lowercase_ :Any = {}
# old versions did not store training args
lowercase_ :Optional[Any] = input_state_dict.get('''args''' , _a )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowercase_ :Any = ds_args.padded_vocab_size
lowercase_ :int = ds_args.max_position_embeddings
lowercase_ :Union[str, Any] = ds_args.hidden_size
lowercase_ :Optional[Any] = ds_args.num_layers
lowercase_ :int = ds_args.num_attention_heads
lowercase_ :List[Any] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowercase_ :Optional[Any] = config.n_head
# The hidden_size per head.
lowercase_ :Dict = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowercase_ :int = input_state_dict['''checkpoint_version''']
else:
lowercase_ :Any = 0.0
# The model.
lowercase_ :Union[str, Any] = input_state_dict['''model''']
# The language model.
lowercase_ :str = model['''language_model''']
# The embeddings.
lowercase_ :Any = lm['''embedding''']
# The word embeddings.
lowercase_ :List[Any] = embeddings['''word_embeddings''']['''weight''']
# Truncate the embedding table to vocab_size rows.
lowercase_ :int = word_embeddings[: config.vocab_size, :]
lowercase_ :List[str] = word_embeddings
# The position embeddings.
lowercase_ :Dict = embeddings['''position_embeddings''']['''weight''']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowercase_ :Union[str, Any] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
lowercase_ :Optional[Any] = pos_embeddings
# The transformer.
lowercase_ :Tuple = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder''']
# The regex to extract layer names.
lowercase_ :Any = re.compile(R'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' )
# The simple map of names for "automated" rules.
lowercase_ :int = {
'''attention.dense''': '''.attn.c_proj.''',
'''self_attention.dense''': '''.attn.c_proj.''',
'''mlp.dense_h_to_4h''': '''.mlp.c_fc.''',
'''mlp.dense_4h_to_h''': '''.mlp.c_proj.''',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowercase_ :Union[str, Any] = layer_re.match(_a )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowercase_ :Union[str, Any] = int(m.group(1 ) )
# The name of the operation.
lowercase_ :Union[str, Any] = m.group(2 )
# Is it a weight or a bias?
lowercase_ :List[Any] = m.group(3 )
# The name of the layer.
lowercase_ :Dict = f"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm''' ):
lowercase_ :Optional[int] = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2'''
lowercase_ :Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowercase_ :str = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _a , _a )
lowercase_ :str = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowercase_ :Union[str, Any] = torch.tensor(-1E4 , dtype=torch.floataa )
lowercase_ :Optional[Any] = masked_bias
lowercase_ :List[str] = fix_query_key_value_ordering(_a , _a , 3 , _a , _a )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowercase_ :Dict = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowercase_ :List[str] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowercase_ :Optional[Any] = fix_query_key_value_ordering(_a , _a , 3 , _a , _a )
# Store. No change of shape.
lowercase_ :List[str] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowercase_ :Optional[int] = megatron_to_transformers[op_name]
lowercase_ :Tuple = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowercase_ :Union[str, Any] = megatron_to_transformers[op_name]
lowercase_ :List[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowercase_ :str = transformer['''final_layernorm.weight''']
lowercase_ :str = transformer['''final_layernorm.bias''']
# For LM head, transformers' wants the matrix to weight embeddings.
lowercase_ :List[str] = word_embeddings
# It should be done!
return output_state_dict
def UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
lowercase_ :Dict = argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' )
parser.add_argument(
'''path_to_checkpoint''' , type=_a , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , )
parser.add_argument(
'''--config_file''' , default='''''' , type=_a , help='''An optional config json file describing the pre-trained model.''' , )
lowercase_ :List[Any] = parser.parse_args()
# Extract the basename.
lowercase_ :Any = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith('''.zip''' ):
with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict:
lowercase_ :List[Any] = torch.load(_a , map_location='''cpu''' )
else:
lowercase_ :Tuple = torch.load(args.path_to_checkpoint , map_location='''cpu''' )
lowercase_ :Union[str, Any] = input_state_dict.get('''args''' , _a )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowercase_ :Tuple = '''gelu_fast'''
elif ds_args.openai_gelu:
lowercase_ :Dict = '''gelu_new'''
else:
lowercase_ :Optional[Any] = '''gelu'''
else:
# in the very early days this used to be "gelu_new"
lowercase_ :Union[str, Any] = '''gelu_new'''
# Spell out all parameters in case the defaults change.
lowercase_ :List[str] = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=_a , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type='''cls_index''' , summary_use_proj=_a , summary_activation=_a , summary_proj_to_labels=_a , summary_first_dropout=0.1 , scale_attn_weights=_a , use_cache=_a , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
lowercase_ :Union[str, Any] = GPTaConfig.from_json_file(args.config_file )
lowercase_ :List[Any] = ['''GPT2LMHeadModel''']
# Convert.
print('''Converting''' )
lowercase_ :Optional[Any] = convert_megatron_checkpoint(_a , _a , _a )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_a , _a )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowercase_ :List[Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowercase_ :Any = '''gpt2'''
elif tokenizer_type == "PretrainedFromHF":
lowercase_ :Dict = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"Unrecognized tokenizer_type {tokenizer_type}" )
else:
lowercase_ :Optional[int] = '''gpt2'''
lowercase_ :List[Any] = AutoTokenizer.from_pretrained(_a )
lowercase_ :Tuple = type(_a ).__name__
lowercase_ :Dict = tokenizer_class
# Store the config to file.
print('''Saving config''' )
config.save_pretrained(_a )
# Save tokenizer based on args
print(f"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(_a )
# Store the state_dict to file.
lowercase_ :Union[str, Any] = os.path.join(_a , '''pytorch_model.bin''' )
print(f"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(_a , _a )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 257 | 0 |
"""simple docstring"""
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class a__ ( unittest.TestCase ):
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = mock.Mock()
lowercase__ = 500
lowercase__ = {}
lowercase__ = HTTPError
lowercase__ = {}
# Download this model to make sure it's in the cache.
lowercase__ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request", return_value=_UpperCAmelCase ) as mock_head:
lowercase__ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = mock.Mock()
lowercase__ = 500
lowercase__ = {}
lowercase__ = HTTPError
lowercase__ = {}
# Download this model to make sure it's in the cache.
lowercase__ = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request", return_value=_UpperCAmelCase ) as mock_head:
lowercase__ = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case__ ( self ):
'''simple docstring'''
try:
lowercase__ = tempfile.mktemp()
with open(_UpperCAmelCase, "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model", _UpperCAmelCase )
lowercase__ = AlbertTokenizer.from_pretrained(_UpperCAmelCase )
finally:
os.remove(_UpperCAmelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json", "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json", _UpperCAmelCase )
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size, 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class a__ ( unittest.TestCase ):
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def snake_case__ ( cls ):
'''simple docstring'''
lowercase__ = TOKEN
HfFolder.save_token(_UpperCAmelCase )
@classmethod
def snake_case__ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def snake_case__ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = os.path.join(_UpperCAmelCase, "vocab.txt" )
with open(_UpperCAmelCase, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowercase__ = BertTokenizer(_UpperCAmelCase )
tokenizer.push_to_hub("test-tokenizer", use_auth_token=self._token )
lowercase__ = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase, repo_id="test-tokenizer", push_to_hub=_UpperCAmelCase, use_auth_token=self._token )
lowercase__ = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
def snake_case__ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = os.path.join(_UpperCAmelCase, "vocab.txt" )
with open(_UpperCAmelCase, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowercase__ = BertTokenizer(_UpperCAmelCase )
tokenizer.push_to_hub("valid_org/test-tokenizer-org", use_auth_token=self._token )
lowercase__ = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
_UpperCAmelCase, repo_id="valid_org/test-tokenizer-org", push_to_hub=_UpperCAmelCase, use_auth_token=self._token )
lowercase__ = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
@require_tokenizers
def snake_case__ ( self ):
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = os.path.join(_UpperCAmelCase, "vocab.txt" )
with open(_UpperCAmelCase, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowercase__ = CustomTokenizer(_UpperCAmelCase )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer", use_auth_token=self._token )
lowercase__ = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''', trust_remote_code=_UpperCAmelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = os.path.join(_UpperCAmelCase, "vocab.txt" )
with open(_UpperCAmelCase, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowercase__ = BertTokenizerFast.from_pretrained(_UpperCAmelCase )
bert_tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = CustomTokenizerFast.from_pretrained(_UpperCAmelCase )
tokenizer.push_to_hub("test-dynamic-tokenizer", use_auth_token=self._token )
lowercase__ = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''', trust_remote_code=_UpperCAmelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizerFast" )
lowercase__ = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''', use_fast=_UpperCAmelCase, trust_remote_code=_UpperCAmelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer" )
class a__ ( unittest.TestCase ):
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ), ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ), ["[CLS]", " This is a ", "extra_id_100"] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ), ["A", "BC"] )
self.assertEqual(trie.split("BCA" ), ["BC", "A"] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ), ["This is something ", "[SPECIAL_TOKEN]"] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ), ["This is something ", "[SPECIAL_TOKEN]"] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ), ["AB", "C"] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ), ["ABC", "D"] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = Trie()
lowercase__ = trie.cut_text("ABC", [0, 0, 2, 1, 2, 3] )
self.assertEqual(_UpperCAmelCase, ["AB", "C"] )
| 668 | """simple docstring"""
from __future__ import annotations
def __a ( A , A ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
lowercase__ = number_of_bytes // partitions
lowercase__ = []
for i in range(A ):
lowercase__ = i * bytes_per_partition + 1
lowercase__ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def a ( self ) -> Dict:
"""simple docstring"""
__snake_case = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 1_28, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_42, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
__snake_case = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 1_28,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 1_42,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(_UpperCamelCase ) , _UpperCamelCase )
def a ( self ) -> Dict:
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_UpperCamelCase ) , x.transpose() ) )
__snake_case = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_UpperCamelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def a ( self ) -> Optional[int]:
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = torch.tensor(_UpperCamelCase )
self.assertTrue(np.allclose(transpose(_UpperCamelCase ) , transpose(_UpperCamelCase ).numpy() ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = torch.tensor(_UpperCamelCase )
self.assertTrue(np.allclose(transpose(_UpperCamelCase , axes=(1, 2, 0) ) , transpose(_UpperCamelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def a ( self ) -> Any:
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = tf.constant(_UpperCamelCase )
self.assertTrue(np.allclose(transpose(_UpperCamelCase ) , transpose(_UpperCamelCase ).numpy() ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = tf.constant(_UpperCamelCase )
self.assertTrue(np.allclose(transpose(_UpperCamelCase , axes=(1, 2, 0) ) , transpose(_UpperCamelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def a ( self ) -> Tuple:
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = jnp.array(_UpperCamelCase )
self.assertTrue(np.allclose(transpose(_UpperCamelCase ) , np.asarray(transpose(_UpperCamelCase ) ) ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = jnp.array(_UpperCamelCase )
self.assertTrue(np.allclose(transpose(_UpperCamelCase , axes=(1, 2, 0) ) , np.asarray(transpose(_UpperCamelCase , axes=(1, 2, 0) ) ) ) )
def a ( self ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_UpperCamelCase , (4, 3) ) , np.reshape(_UpperCamelCase , (4, 3) ) ) )
__snake_case = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_UpperCamelCase , (12, 5) ) , np.reshape(_UpperCamelCase , (12, 5) ) ) )
@require_torch
def a ( self ) -> Any:
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = torch.tensor(_UpperCamelCase )
self.assertTrue(np.allclose(reshape(_UpperCamelCase , (4, 3) ) , reshape(_UpperCamelCase , (4, 3) ).numpy() ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = torch.tensor(_UpperCamelCase )
self.assertTrue(np.allclose(reshape(_UpperCamelCase , (12, 5) ) , reshape(_UpperCamelCase , (12, 5) ).numpy() ) )
@require_tf
def a ( self ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = tf.constant(_UpperCamelCase )
self.assertTrue(np.allclose(reshape(_UpperCamelCase , (4, 3) ) , reshape(_UpperCamelCase , (4, 3) ).numpy() ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = tf.constant(_UpperCamelCase )
self.assertTrue(np.allclose(reshape(_UpperCamelCase , (12, 5) ) , reshape(_UpperCamelCase , (12, 5) ).numpy() ) )
@require_flax
def a ( self ) -> Dict:
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = jnp.array(_UpperCamelCase )
self.assertTrue(np.allclose(reshape(_UpperCamelCase , (4, 3) ) , np.asarray(reshape(_UpperCamelCase , (4, 3) ) ) ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = jnp.array(_UpperCamelCase )
self.assertTrue(np.allclose(reshape(_UpperCamelCase , (12, 5) ) , np.asarray(reshape(_UpperCamelCase , (12, 5) ) ) ) )
def a ( self ) -> Any:
"""simple docstring"""
__snake_case = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_UpperCamelCase ) , np.squeeze(_UpperCamelCase ) ) )
__snake_case = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_UpperCamelCase , axis=2 ) , np.squeeze(_UpperCamelCase , axis=2 ) ) )
@require_torch
def a ( self ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = np.random.randn(1 , 3 , 4 )
__snake_case = torch.tensor(_UpperCamelCase )
self.assertTrue(np.allclose(squeeze(_UpperCamelCase ) , squeeze(_UpperCamelCase ).numpy() ) )
__snake_case = np.random.randn(1 , 4 , 1 , 5 )
__snake_case = torch.tensor(_UpperCamelCase )
self.assertTrue(np.allclose(squeeze(_UpperCamelCase , axis=2 ) , squeeze(_UpperCamelCase , axis=2 ).numpy() ) )
@require_tf
def a ( self ) -> Dict:
"""simple docstring"""
__snake_case = np.random.randn(1 , 3 , 4 )
__snake_case = tf.constant(_UpperCamelCase )
self.assertTrue(np.allclose(squeeze(_UpperCamelCase ) , squeeze(_UpperCamelCase ).numpy() ) )
__snake_case = np.random.randn(1 , 4 , 1 , 5 )
__snake_case = tf.constant(_UpperCamelCase )
self.assertTrue(np.allclose(squeeze(_UpperCamelCase , axis=2 ) , squeeze(_UpperCamelCase , axis=2 ).numpy() ) )
@require_flax
def a ( self ) -> List[Any]:
"""simple docstring"""
__snake_case = np.random.randn(1 , 3 , 4 )
__snake_case = jnp.array(_UpperCamelCase )
self.assertTrue(np.allclose(squeeze(_UpperCamelCase ) , np.asarray(squeeze(_UpperCamelCase ) ) ) )
__snake_case = np.random.randn(1 , 4 , 1 , 5 )
__snake_case = jnp.array(_UpperCamelCase )
self.assertTrue(np.allclose(squeeze(_UpperCamelCase , axis=2 ) , np.asarray(squeeze(_UpperCamelCase , axis=2 ) ) ) )
def a ( self ) -> Optional[int]:
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_UpperCamelCase , axis=1 ) , np.expand_dims(_UpperCamelCase , axis=1 ) ) )
@require_torch
def a ( self ) -> Tuple:
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = torch.tensor(_UpperCamelCase )
self.assertTrue(np.allclose(expand_dims(_UpperCamelCase , axis=1 ) , expand_dims(_UpperCamelCase , axis=1 ).numpy() ) )
@require_tf
def a ( self ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = tf.constant(_UpperCamelCase )
self.assertTrue(np.allclose(expand_dims(_UpperCamelCase , axis=1 ) , expand_dims(_UpperCamelCase , axis=1 ).numpy() ) )
@require_flax
def a ( self ) -> Optional[Any]:
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = jnp.array(_UpperCamelCase )
self.assertTrue(np.allclose(expand_dims(_UpperCamelCase , axis=1 ) , np.asarray(expand_dims(_UpperCamelCase , axis=1 ) ) ) )
| 268 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCamelCase__ = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def lowerCamelCase__ ( __A :Any ,__A :tuple ,__A :Path ,__A :int ,__A :Union[str, Any] ,__A :Optional[Any] ,__A :Optional[Any] ,__A :List[Any]=False ,):
"""simple docstring"""
output_path.parent.mkdir(parents=__A ,exist_ok=__A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__A ,__A ,f=output_path.as_posix() ,input_names=__A ,output_names=__A ,dynamic_axes=__A ,do_constant_folding=__A ,use_external_data_format=__A ,enable_onnx_checker=__A ,opset_version=__A ,)
else:
export(
__A ,__A ,f=output_path.as_posix() ,input_names=__A ,output_names=__A ,dynamic_axes=__A ,do_constant_folding=__A ,opset_version=__A ,)
@torch.no_grad()
def lowerCamelCase__ ( __A :str ,__A :str ,__A :int ,__A :bool = False ):
"""simple docstring"""
__snake_case = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__snake_case = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
__snake_case = """cpu"""
__snake_case = Path(__A )
# VAE DECODER
__snake_case = AutoencoderKL.from_pretrained(model_path + """/vae""" )
__snake_case = vae_decoder.config.latent_channels
# forward only through the decoder part
__snake_case = vae_decoder.decode
onnx_export(
__A ,model_args=(
torch.randn(1 ,__A ,2_5 ,2_5 ).to(device=__A ,dtype=__A ),
False,
) ,output_path=output_path / """vae_decoder""" / """model.onnx""" ,ordered_input_names=["""latent_sample""", """return_dict"""] ,output_names=["""sample"""] ,dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} ,opset=__A ,)
del vae_decoder
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
UpperCamelCase__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 268 | 1 |
def lowerCAmelCase_ ( __lowerCamelCase = 1_0_0_0 ):
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 203 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
_snake_case : List[str] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : int , *lowerCamelCase : Any , **lowerCamelCase : Union[str, Any] ) -> None:
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 203 | 1 |
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
if k in (0.0_4, 0.0_6):
A__ = k
A__ = window_size
else:
raise ValueError("invalid k value" )
def __str__( self ) -> Tuple:
return str(self.k )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> str:
A__ = cva.imread(_lowercase , 0 )
A__ , A__ = img.shape
A__ = []
A__ = img.copy()
A__ = cva.cvtColor(_lowercase , cva.COLOR_GRAY2RGB )
A__ , A__ = np.gradient(_lowercase )
A__ = dx**2
A__ = dy**2
A__ = dx * dy
A__ = 0.0_4
A__ = self.window_size // 2
for y in range(_lowercase , h - offset ):
for x in range(_lowercase , w - offset ):
A__ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ = (wxx * wyy) - (wxy**2)
A__ = wxx + wyy
A__ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
UpperCamelCase = HarrisCorner(0.04, 3)
UpperCamelCase , UpperCamelCase = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 104 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
_lowercase = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
_lowercase = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
_lowercase = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _lowercase ( self ):
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _lowercase ( self , _lowercase , _lowercase , _lowercase=None , _lowercase="uniform_average" , _lowercase=True ):
"""simple docstring"""
_lowerCAmelCase = mean_squared_error(
_lowercase , _lowercase , sample_weight=_lowercase , multioutput=_lowercase , squared=_lowercase )
return {"mse": mse}
| 5 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A_ : Optional[Any] =logging.get_logger(__name__)
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["pixel_values"]
def __init__( self , a__ = True , a__ = 32 , a__=PILImageResampling.BILINEAR , a__ = True , **a__ , ):
_lowerCamelCase = do_resize
_lowerCamelCase = do_rescale
_lowerCamelCase = size_divisor
_lowerCamelCase = resample
super().__init__(**a__ )
def snake_case_ ( self , a__ , a__ , a__ , a__ = None , **a__ ):
_lowerCamelCase , _lowerCamelCase = get_image_size(a__ )
# Rounds the height and width down to the closest multiple of size_divisor
_lowerCamelCase = height // size_divisor * size_divisor
_lowerCamelCase = width // size_divisor * size_divisor
_lowerCamelCase = resize(a__ , (new_h, new_w) , resample=a__ , data_format=a__ , **a__ )
return image
def snake_case_ ( self , a__ , a__ , a__ = None , **a__ ):
return rescale(image=a__ , scale=a__ , data_format=a__ , **a__ )
def snake_case_ ( self , a__ , a__ = None , a__ = None , a__=None , a__ = None , a__ = None , a__ = ChannelDimension.FIRST , **a__ , ):
_lowerCamelCase = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase = size_divisor if size_divisor is not None else self.size_divisor
_lowerCamelCase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
_lowerCamelCase = make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
_lowerCamelCase = [to_numpy_array(a__ ) for img in images]
if do_resize:
_lowerCamelCase = [self.resize(a__ , size_divisor=a__ , resample=a__ ) for image in images]
if do_rescale:
_lowerCamelCase = [self.rescale(a__ , scale=1 / 2_55 ) for image in images]
_lowerCamelCase = [to_channel_dimension_format(a__ , a__ ) for image in images]
_lowerCamelCase = {'pixel_values': images}
return BatchFeature(data=a__ , tensor_type=a__ )
| 702 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
A_ : List[str] =logging.get_logger(__name__)
A_ : Optional[int] ={
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Any = "perceiver"
def __init__( self , a__=2_56 , a__=12_80 , a__=7_68 , a__=1 , a__=26 , a__=8 , a__=8 , a__=None , a__=None , a__="kv" , a__=1 , a__=1 , a__="gelu" , a__=0.1 , a__=0.02 , a__=1e-12 , a__=True , a__=2_62 , a__=20_48 , a__=56 , a__=[3_68, 4_96] , a__=16 , a__=19_20 , a__=16 , a__=[1, 16, 2_24, 2_24] , **a__ , ):
super().__init__(**a__ )
_lowerCamelCase = num_latents
_lowerCamelCase = d_latents
_lowerCamelCase = d_model
_lowerCamelCase = num_blocks
_lowerCamelCase = num_self_attends_per_block
_lowerCamelCase = num_self_attention_heads
_lowerCamelCase = num_cross_attention_heads
_lowerCamelCase = qk_channels
_lowerCamelCase = v_channels
_lowerCamelCase = cross_attention_shape_for_attention
_lowerCamelCase = self_attention_widening_factor
_lowerCamelCase = cross_attention_widening_factor
_lowerCamelCase = hidden_act
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = use_query_residual
# masked language modeling attributes
_lowerCamelCase = vocab_size
_lowerCamelCase = max_position_embeddings
# image classification attributes
_lowerCamelCase = image_size
# flow attributes
_lowerCamelCase = train_size
# multimodal autoencoding attributes
_lowerCamelCase = num_frames
_lowerCamelCase = audio_samples_per_frame
_lowerCamelCase = samples_per_patch
_lowerCamelCase = output_shape
class __a ( lowerCAmelCase__ ):
@property
def snake_case_ ( self ):
if self.task == "multiple-choice":
_lowerCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def snake_case_ ( self ):
return 1e-4
def snake_case_ ( self , a__ , a__ = -1 , a__ = -1 , a__ = -1 , a__ = False , a__ = None , a__ = 3 , a__ = 40 , a__ = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(a__ , a__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCamelCase = compute_effective_axis_dimension(
a__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCamelCase = preprocessor.num_special_tokens_to_add(a__ )
_lowerCamelCase = compute_effective_axis_dimension(
a__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a__ )
# Generate dummy inputs according to compute batch and sequence
_lowerCamelCase = [' '.join(['a'] ) * seq_length] * batch_size
_lowerCamelCase = dict(preprocessor(a__ , return_tensors=a__ ) )
_lowerCamelCase = inputs.pop('input_ids' )
return inputs
elif isinstance(a__ , a__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCamelCase = compute_effective_axis_dimension(a__ , fixed_dimension=OnnxConfig.default_fixed_batch )
_lowerCamelCase = self._generate_dummy_images(a__ , a__ , a__ , a__ )
_lowerCamelCase = dict(preprocessor(images=a__ , return_tensors=a__ ) )
_lowerCamelCase = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 222 | 0 |
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_snake_case = get_logger(__name__)
class UpperCamelCase ( enum.Enum ):
UpperCamelCase : str = '''all_checks'''
UpperCamelCase : Any = '''basic_checks'''
UpperCamelCase : Union[str, Any] = '''no_checks'''
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
'''simple docstring'''
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
_a : int = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_a : List[str] = """ for """ + verification_name if verification_name is not None else """"""
if len(UpperCamelCase__ ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise ExpectedMoreSplits(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise UnexpectedSplits(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
_a : List[Any] = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(UpperCamelCase__ ) > 0:
raise NonMatchingSplitsSizesError(str(UpperCamelCase__ ) )
logger.info("""All the splits matched successfully.""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ = True ):
'''simple docstring'''
if record_checksum:
_a : int = shaaaa()
with open(UpperCamelCase__ , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , B"""""" ):
m.update(UpperCamelCase__ )
_a : List[Any] = m.hexdigest()
else:
_a : Any = None
return {"num_bytes": os.path.getsize(UpperCamelCase__ ), "checksum": checksum}
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 389 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCamelCase :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : str=[1, 2, 1] , UpperCAmelCase__ : Union[str, Any]=[2, 2, 4] , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=2.0 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=0.0_2 , UpperCAmelCase__ : List[str]=1E-5 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Dict=10 , UpperCAmelCase__ : str=8 , UpperCAmelCase__ : Tuple=["stage1", "stage2", "stage3"] , UpperCAmelCase__ : Union[str, Any]=[1, 2, 3] , ) -> str:
_a : Union[str, Any] = parent
_a : str = batch_size
_a : int = image_size
_a : Optional[Any] = patch_size
_a : Tuple = num_channels
_a : str = embed_dim
_a : int = depths
_a : List[Any] = num_heads
_a : int = window_size
_a : Optional[int] = mlp_ratio
_a : Optional[int] = qkv_bias
_a : Dict = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : Tuple = drop_path_rate
_a : List[Any] = hidden_act
_a : List[Any] = use_absolute_embeddings
_a : Optional[Any] = patch_norm
_a : Dict = layer_norm_eps
_a : Dict = initializer_range
_a : Union[str, Any] = is_training
_a : List[str] = scope
_a : Any = use_labels
_a : Any = type_sequence_label_size
_a : Dict = encoder_stride
_a : Optional[int] = out_features
_a : Any = out_indices
def _lowercase ( self : Optional[Any] ) -> int:
_a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Tuple = None
if self.use_labels:
_a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Any = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Optional[int] ) -> List[str]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowercase ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple ) -> List[str]:
_a : int = MaskFormerSwinModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : Any = model(UpperCAmelCase__ )
_a : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_a : List[str] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[Any]:
_a : Optional[Any] = MaskFormerSwinBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : Union[str, Any] = model(UpperCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCAmelCase__ ):
_a : Dict = ["""stem"""]
_a : Optional[int] = MaskFormerSwinBackbone(config=UpperCAmelCase__ )
def _lowercase ( self : Optional[int] ) -> Tuple:
_a : Optional[int] = self.prepare_config_and_inputs()
_a , _a , _a : str = config_and_inputs
_a : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : str = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase : Optional[Any] = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
UpperCamelCase : str = False
UpperCamelCase : List[str] = False
UpperCamelCase : str = False
UpperCamelCase : Tuple = False
UpperCamelCase : Optional[int] = False
def _lowercase ( self : Any ) -> int:
_a : Optional[int] = MaskFormerSwinModelTester(self )
_a : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _lowercase ( self : int ) -> str:
pass
def _lowercase ( self : Dict ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : Tuple ) -> Union[str, Any]:
return
def _lowercase ( self : List[Any] ) -> Optional[Any]:
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowercase ( self : str ) -> Tuple:
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase__ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _lowercase ( self : Dict ) -> List[str]:
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _lowercase ( self : Optional[Any] ) -> Dict:
pass
def _lowercase ( self : int ) -> Union[str, Any]:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Tuple = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def _lowercase ( self : Any ) -> Tuple:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(UpperCAmelCase__ )
_a : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Dict = [*signature.parameters.keys()]
_a : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _lowercase ( self : Optional[Any] ) -> int:
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _lowercase ( self : Any ) -> List[Any]:
pass
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str ) -> Union[str, Any]:
_a : Optional[Any] = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
_a : Tuple = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
_a : Dict = outputs.hidden_states
_a : Optional[int] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
# Swin has a different seq_length
_a : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_a : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowercase ( self : str ) -> Dict:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_a : Optional[Any] = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : str = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Tuple = 3
_a : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_a : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_a : Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_a : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_a : Optional[int] = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Union[str, Any] = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _lowercase ( self : Any ) -> Any:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : List[str] ) -> Optional[int]:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
pass
def _lowercase ( self : Dict ) -> List[str]:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCAmelCase__ : List[Any] ):
_a : int = 0
return t
def check_equivalence(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any]={} ):
with torch.no_grad():
_a : Union[str, Any] = model(**UpperCAmelCase__ , return_dict=UpperCAmelCase__ , **UpperCAmelCase__ )
_a : Any = model(**UpperCAmelCase__ , return_dict=UpperCAmelCase__ , **UpperCAmelCase__ ).to_tuple()
def recursive_check(UpperCAmelCase__ : int , UpperCAmelCase__ : str ):
if isinstance(UpperCAmelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
recursive_check(UpperCAmelCase__ , UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCAmelCase__ , UpperCAmelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCAmelCase__ ) , set_nan_tensor_to_zero(UpperCAmelCase__ ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(UpperCAmelCase__ ).any()} and `inf`: {torch.isinf(UpperCAmelCase__ )}. Dict has"""
f""" `nan`: {torch.isnan(UpperCAmelCase__ ).any()} and `inf`: {torch.isinf(UpperCAmelCase__ )}."""
) , )
recursive_check(UpperCAmelCase__ , UpperCAmelCase__ )
for model_class in self.all_model_classes:
_a : Any = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : Dict = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Any = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : int = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
_a : List[Any] = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : Any = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Union[str, Any] = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , {"""output_hidden_states""": True} )
_a : int = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
_a : Tuple = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , {"""output_hidden_states""": True} )
@require_torch
class UpperCamelCase ( unittest.TestCase , snake_case_ ):
UpperCamelCase : Tuple = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCamelCase : Dict = MaskFormerSwinConfig
def _lowercase ( self : int ) -> int:
_a : Union[str, Any] = MaskFormerSwinModelTester(self )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
_a : str = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
_a : Optional[int] = backbone_class(UpperCAmelCase__ )
backbone.to(UpperCAmelCase__ )
backbone.eval()
_a : Any = backbone(**UpperCAmelCase__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCAmelCase__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_a : Union[str, Any] = backbone(**UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_a , _a , _a : Optional[int] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_a : Any = backbone(**UpperCAmelCase__ , output_attentions=UpperCAmelCase__ )
self.assertIsNotNone(outputs.attentions )
| 389 | 1 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
UpperCamelCase = logging.getLogger(__name__)
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=-1 ) -> str:
# in NER datasets, the last column is usually reserved for NER label
lowerCAmelCase__ = label_idx
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = mode.value
lowerCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , f'{mode}.txt' )
lowerCAmelCase__ = 1
lowerCAmelCase__ = []
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as f:
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
guid_index += 1
lowerCAmelCase__ = []
lowerCAmelCase__ = []
else:
lowerCAmelCase__ = line.split(" " )
words.append(splits[0] )
if len(SCREAMING_SNAKE_CASE__ ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
return examples
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : List ) -> Dict:
lowerCAmelCase__ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(SCREAMING_SNAKE_CASE__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(SCREAMING_SNAKE_CASE__ )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
lowerCAmelCase__ = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Dict ) -> List[str]:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def a ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
lowerCAmelCase__ = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = mode.value
lowerCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , f'{mode}.txt' )
lowerCAmelCase__ = 1
lowerCAmelCase__ = []
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as f:
for sentence in parse_incr(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
guid_index += 1
return examples
def a ( self : int , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : List ) -> int:
lowerCAmelCase__ = 0
for sentence in parse_incr(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = preds_list[example_id]
lowerCAmelCase__ = ""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(SCREAMING_SNAKE_CASE__ )
example_id += 1
def a ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 125 |
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
lowerCAmelCase__ = n
lowerCAmelCase__ = [None] * self.n
lowerCAmelCase__ = 0 # index of the first element
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
def __len__( self : str ) -> int:
return self.size
def a ( self : Any ) -> bool:
return self.size == 0
def a ( self : Dict ) -> List[str]:
return False if self.is_empty() else self.array[self.front]
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Dict ) -> Dict:
if self.size >= self.n:
raise Exception("QUEUE IS FULL" )
lowerCAmelCase__ = data
lowerCAmelCase__ = (self.rear + 1) % self.n
self.size += 1
return self
def a ( self : int ) -> Tuple:
if self.size == 0:
raise Exception("UNDERFLOW" )
lowerCAmelCase__ = self.array[self.front]
lowerCAmelCase__ = None
lowerCAmelCase__ = (self.front + 1) % self.n
self.size -= 1
return temp
| 125 | 1 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
a = numpy.array([0, 0])
a = numpy.array([0.5, 0.866_0254])
a = numpy.array([1, 0])
a = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> list[numpy.ndarray]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = initial_vectors
for _ in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = iteration_step(__UpperCAmelCase )
return vectors
def __magic_name__ ( __UpperCAmelCase ) -> list[numpy.ndarray]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for i, start_vector in enumerate(vectors[:-1] ):
__SCREAMING_SNAKE_CASE = vectors[i + 1]
new_vectors.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> numpy.ndarray:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = numpy.radians(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = numpy.cos(__UpperCAmelCase ), numpy.sin(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = zip(*__UpperCAmelCase )
plt.plot(__UpperCAmelCase , __UpperCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
a = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 109 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , snake_case__=1000 , ):
'''simple docstring'''
lowercase__ : Union[str, Any]= parent
lowercase__ : Any= batch_size
lowercase__ : str= seq_length
lowercase__ : str= is_training
lowercase__ : Optional[int]= use_input_mask
lowercase__ : Dict= use_token_type_ids
lowercase__ : Optional[int]= use_labels
lowercase__ : List[str]= vocab_size
lowercase__ : Optional[int]= hidden_size
lowercase__ : List[str]= num_hidden_layers
lowercase__ : Optional[int]= num_attention_heads
lowercase__ : Tuple= intermediate_size
lowercase__ : int= hidden_act
lowercase__ : Any= hidden_dropout_prob
lowercase__ : Dict= attention_probs_dropout_prob
lowercase__ : List[Any]= max_position_embeddings
lowercase__ : Optional[int]= type_vocab_size
lowercase__ : str= type_sequence_label_size
lowercase__ : Union[str, Any]= initializer_range
lowercase__ : Union[str, Any]= num_labels
lowercase__ : Dict= num_choices
lowercase__ : Dict= scope
lowercase__ : int= range_bbox
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowercase__ : str= ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase__ : Any= bbox[i, j, 3]
lowercase__ : Tuple= bbox[i, j, 1]
lowercase__ : int= t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase__ : str= bbox[i, j, 2]
lowercase__ : List[Any]= bbox[i, j, 0]
lowercase__ : int= t
lowercase__ : Optional[int]= tf.convert_to_tensor(snake_case__ )
lowercase__ : Any= None
if self.use_input_mask:
lowercase__ : Dict= random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : List[Any]= None
if self.use_token_type_ids:
lowercase__ : Dict= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : int= None
lowercase__ : Tuple= None
lowercase__ : List[str]= None
if self.use_labels:
lowercase__ : Optional[Any]= ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : List[Any]= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Union[str, Any]= ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : Any= LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[Any]= TFLayoutLMModel(config=snake_case__ )
lowercase__ : Dict= model(snake_case__ , snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowercase__ : str= model(snake_case__ , snake_case__ , token_type_ids=snake_case__ )
lowercase__ : Any= model(snake_case__ , snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : List[str]= TFLayoutLMForMaskedLM(config=snake_case__ )
lowercase__ : int= model(snake_case__ , snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.num_labels
lowercase__ : List[Any]= TFLayoutLMForSequenceClassification(config=snake_case__ )
lowercase__ : Optional[Any]= model(snake_case__ , snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Dict= self.num_labels
lowercase__ : Union[str, Any]= TFLayoutLMForTokenClassification(config=snake_case__ )
lowercase__ : Tuple= model(snake_case__ , snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : int= TFLayoutLMForQuestionAnswering(config=snake_case__ )
lowercase__ : int= model(snake_case__ , snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) : Any= config_and_inputs
lowercase__ : Optional[Any]= {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
__lowerCamelCase = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = True
__lowerCamelCase = 10
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= TFLayoutLMModelTester(self )
lowercase__ : List[Any]= ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int= TFLayoutLMModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def lowercase__() ->List[Any]:
"""simple docstring"""
lowercase__ : List[str]= tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231
lowercase__ : List[str]= tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowercase__ : Tuple= tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231
lowercase__ : Optional[int]= tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowercase__ : Dict= tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Union[str, Any]= prepare_layoutlm_batch_inputs()
# forward pass
lowercase__ : Union[str, Any]= model(input_ids=snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
# test the sequence output on [0, :3, :3]
lowercase__ : Tuple= tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case__ , atol=1e-3 ) )
# test the pooled output on [1, :3]
lowercase__ : Tuple= tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case__ , atol=1e-3 ) )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# initialize model with randomly initialized sequence classification head
lowercase__ : int= TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 )
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Dict= prepare_layoutlm_batch_inputs()
# forward pass
lowercase__ : List[Any]= model(
input_ids=snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
lowercase__ : Any= outputs.loss
lowercase__ : Union[str, Any]= (2,)
self.assertEqual(loss.shape , snake_case__ )
# test the shape of the logits
lowercase__ : Dict= outputs.logits
lowercase__ : Optional[int]= (2, 2)
self.assertEqual(logits.shape , snake_case__ )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# initialize model with randomly initialized token classification head
lowercase__ : List[str]= TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 )
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Any= prepare_layoutlm_batch_inputs()
# forward pass
lowercase__ : Optional[Any]= model(
input_ids=snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
# test the shape of the logits
lowercase__ : List[str]= outputs.logits
lowercase__ : Dict= tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , snake_case__ )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# initialize model with randomly initialized token classification head
lowercase__ : List[Any]= TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : str= prepare_layoutlm_batch_inputs()
# forward pass
lowercase__ : int= model(input_ids=snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
# test the shape of the logits
lowercase__ : List[str]= tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , snake_case__ )
self.assertEqual(outputs.end_logits.shape , snake_case__ )
| 218 | 0 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __A (_SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ :List[str] = {}
lowerCAmelCase__ :Any = tokenizer(example['content'] , truncation=_UpperCAmelCase )['input_ids']
lowerCAmelCase__ :Optional[Any] = len(example['content'] ) / len(output['input_ids'] )
return output
__A = HfArgumentParser(PretokenizationArguments)
__A = parser.parse_args()
if args.num_workers is None:
__A = multiprocessing.cpu_count()
__A = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__A = time.time()
__A = load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__A = time.time()
__A = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__A = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 716 |
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__A = logging.get_logger(__name__)
def __A (_SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :List[str] = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
__magic_name__ :List[int] = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
__magic_name__ :List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Use FP16 to accelerate inference."""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Benchmark training of model"""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Verbose memory tracing"""} )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
__magic_name__ :bool = field(
default=a , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Trace memory line by line"""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Save result to a CSV file"""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Save all print statements in a log file"""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Whether to print environment information"""} )
__magic_name__ :bool = field(
default=a , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
__magic_name__ :str = field(
default=f"""inference_time_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
__magic_name__ :str = field(
default=f"""inference_memory_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
__magic_name__ :str = field(
default=f"""train_time_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
__magic_name__ :str = field(
default=f"""train_memory_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
__magic_name__ :str = field(
default=f"""env_info_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving environment information."""} , )
__magic_name__ :str = field(
default=f"""log_{round(time() )}.csv""" , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
__magic_name__ :int = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} )
__magic_name__ :bool = field(
default=a , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def snake_case ( self ):
'''simple docstring'''
warnings.warn(
F"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' , __UpperCAmelCase , )
def snake_case ( self ):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def snake_case ( self ):
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].' )
return self.models
@property
def snake_case ( self ):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.' )
return False
else:
return True
| 560 | 0 |
SCREAMING_SNAKE_CASE : List[str] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
SCREAMING_SNAKE_CASE : str = [{"type": "code", "content": INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE : Optional[int] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 89 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : List[Any] = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = ["PoolFormerFeatureExtractor"]
SCREAMING_SNAKE_CASE : List[Any] = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 89 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ="speech_to_text"
__a =["past_key_values"]
__a ={"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , __a : int=1_00_00 , __a : List[str]=12 , __a : Optional[int]=20_48 , __a : List[Any]=4 , __a : Optional[Any]=6 , __a : List[str]=20_48 , __a : str=4 , __a : Dict=0.0 , __a : List[Any]=0.0 , __a : Optional[int]=True , __a : str=True , __a : Optional[Any]="relu" , __a : Optional[Any]=2_56 , __a : Union[str, Any]=0.1 , __a : str=0.0 , __a : Dict=0.0 , __a : Tuple=0.02 , __a : Dict=2 , __a : int=True , __a : Optional[Any]=1 , __a : int=0 , __a : Dict=2 , __a : Dict=60_00 , __a : Optional[int]=10_24 , __a : Any=2 , __a : int=(5, 5) , __a : Dict=10_24 , __a : List[str]=80 , __a : List[str]=1 , **__a : Any , ):
_a = vocab_size
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = use_cache
_a = encoder_layers
_a = scale_embedding # scale factor will be sqrt(d_model) if True
_a = max_source_positions
_a = max_target_positions
_a = num_conv_layers
_a = list(__a )
_a = conv_channels
_a = input_feat_per_channel
_a = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` "
f'but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '
f'`config.num_conv_layers = {self.num_conv_layers}`.' )
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
| 706 |
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowerCAmelCase_ : Any = 'path-to-your-trained-model'
lowerCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('cuda')
lowerCAmelCase_ : Optional[Any] = 'A photo of sks dog in a bucket'
lowerCAmelCase_ : Tuple = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('dog-bucket.png')
| 521 | 0 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _A ( __snake_case :List[Any] , __snake_case :str , __snake_case :Tuple ) -> str:
"""simple docstring"""
if openai_config_file == "":
__SCREAMING_SNAKE_CASE = OpenAIGPTConfig()
else:
__SCREAMING_SNAKE_CASE = OpenAIGPTConfig.from_json_file(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = OpenAIGPTModel(__SCREAMING_SNAKE_CASE )
# Load weights from numpy
load_tf_weights_in_openai_gpt(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
__SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
__SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--openai_checkpoint_folder_path',
default=None,
type=str,
required=True,
help='Path to the TensorFlow checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--openai_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
_snake_case : Tuple = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 693 |
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
pass
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
for shard in shards:
for i in range(__SCREAMING_SNAKE_CASE ):
yield {"i": i, "shard": shard}
def __lowercase ( ) -> Tuple:
"""simple docstring"""
__a = int(os.environ["""RANK"""] )
__a = int(os.environ["""WORLD_SIZE"""] )
__a = ArgumentParser()
parser.add_argument("""--streaming""" , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--local_rank""" , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--num_workers""" , type=__SCREAMING_SNAKE_CASE , default=0 )
__a = parser.parse_args()
__a = args.streaming
__a = args.num_workers
__a = {"""shards""": [F'''shard_{shard_idx}''' for shard_idx in range(__SCREAMING_SNAKE_CASE )]}
__a = IterableDataset.from_generator(__SCREAMING_SNAKE_CASE , gen_kwargs=__SCREAMING_SNAKE_CASE )
if not streaming:
__a = Dataset.from_list(list(__SCREAMING_SNAKE_CASE ) )
__a = split_dataset_by_node(__SCREAMING_SNAKE_CASE , rank=__SCREAMING_SNAKE_CASE , world_size=__SCREAMING_SNAKE_CASE )
__a = torch.utils.data.DataLoader(__SCREAMING_SNAKE_CASE , num_workers=__SCREAMING_SNAKE_CASE )
__a = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__a = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__a = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 582 | 0 |
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[Any] = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
__magic_name__ :int = n - k
# Calculate C(n,k)
for i in range(snake_case ):
result *= n - i
result //= i + 1
return result
def __lowercase ( snake_case ):
"""simple docstring"""
return binomial_coefficient(2 * node_count, snake_case ) // (node_count + 1)
def __lowercase ( snake_case ):
"""simple docstring"""
if n < 0:
raise ValueError('''factorial() not defined for negative values''' )
__magic_name__ :int = 1
for i in range(1, n + 1 ):
result *= i
return result
def __lowercase ( snake_case ):
"""simple docstring"""
return catalan_number(snake_case ) * factorial(snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
f"Given {node_count} nodes, there are {binary_tree_count(node_count)} "
f"binary trees and {catalan_number(node_count)} binary search trees."
)
| 180 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = 1_0
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = [1, 2, 3, 4]
__magic_name__ :Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__lowerCAmelCase , self.block_size , 0 ) , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
__magic_name__ :Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(__lowerCAmelCase , self.block_size , 0 ) , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
__magic_name__ :str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(__lowerCAmelCase , self.block_size , 0 ) , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
__magic_name__ , __magic_name__ :Optional[int] = process_story(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = ''''''
__magic_name__ , __magic_name__ :Optional[int] = process_story(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [] )
self.assertEqual(__lowerCAmelCase , [] )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
__magic_name__ , __magic_name__ :str = process_story(__lowerCAmelCase )
__magic_name__ :Optional[int] = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Union[str, Any] = ['''It was the best of times.''']
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = torch.tensor([1, 2, 3, 4] )
__magic_name__ :List[str] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__lowerCAmelCase , 0 ).numpy() , expected.numpy() )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
__magic_name__ :Union[str, Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__lowerCAmelCase , 2_3 ).numpy() , expected.numpy() )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__magic_name__ :Dict = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__lowerCAmelCase , 1 ).numpy() , expected.numpy() )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = 1_0_1
__magic_name__ :List[Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
__magic_name__ :str = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__magic_name__ :Tuple = compute_token_type_ids(__lowerCAmelCase , __lowerCAmelCase )
np.testing.assert_array_equal(__lowerCAmelCase , __lowerCAmelCase )
| 180 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCAmelCase__ :Dict = logging.get_logger(__name__)
UpperCAmelCase__ :Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCAmelCase__ :Any = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
UpperCAmelCase__ :List[str] = {
"""allenai/led-base-16384""": 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __lowercase () -> Tuple:
"""simple docstring"""
__lowerCamelCase : Optional[int] = (
list(range(ord("""!""" ), ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ), ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ), ord("""ÿ""" ) + 1 ) )
)
__lowerCamelCase : Optional[Any] = bs[:]
__lowerCamelCase : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowercase )
cs.append(2**8 + n )
n += 1
__lowerCamelCase : List[str] = [chr(_lowercase ) for n in cs]
return dict(zip(_lowercase, _lowercase ) )
def __lowercase (_lowercase ) -> Any:
"""simple docstring"""
__lowerCamelCase : Optional[Any] = set()
__lowerCamelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase : Optional[Any] = char
return pairs
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : Optional[int] = VOCAB_FILES_NAMES
snake_case__ : int = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : str = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , A__ : str , A__ : List[Any] , A__ : Dict="replace" , A__ : List[Any]="<s>" , A__ : Any="</s>" , A__ : Optional[Any]="</s>" , A__ : Optional[Any]="<s>" , A__ : str="<unk>" , A__ : List[Any]="<pad>" , A__ : Optional[Any]="<mask>" , A__ : List[Any]=False , **A__ : Union[str, Any] , ):
"""simple docstring"""
__lowerCamelCase : Optional[int] = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else bos_token
__lowerCamelCase : List[Any] = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else eos_token
__lowerCamelCase : str = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else sep_token
__lowerCamelCase : Optional[Any] = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else cls_token
__lowerCamelCase : Tuple = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else unk_token
__lowerCamelCase : List[str] = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : List[str] = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else mask_token
super().__init__(
errors=A__ , bos_token=A__ , eos_token=A__ , unk_token=A__ , sep_token=A__ , cls_token=A__ , pad_token=A__ , mask_token=A__ , add_prefix_space=A__ , **A__ , )
with open(A__ , encoding="""utf-8""" ) as vocab_handle:
__lowerCamelCase : Union[str, Any] = json.load(A__ )
__lowerCamelCase : List[str] = {v: k for k, v in self.encoder.items()}
__lowerCamelCase : Optional[Any] = errors # how to handle errors in decoding
__lowerCamelCase : Union[str, Any] = bytes_to_unicode()
__lowerCamelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(A__ , encoding="""utf-8""" ) as merges_handle:
__lowerCamelCase : Tuple = merges_handle.read().split("""\n""" )[1:-1]
__lowerCamelCase : Any = [tuple(merge.split() ) for merge in bpe_merges]
__lowerCamelCase : int = dict(zip(A__ , range(len(A__ ) ) ) )
__lowerCamelCase : List[Any] = {}
__lowerCamelCase : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowerCamelCase : int = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
return len(self.encoder )
def a_ ( self : Dict ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def a_ ( self : Tuple , A__ : Union[str, Any] ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__lowerCamelCase : Union[str, Any] = tuple(A__ )
__lowerCamelCase : int = get_pairs(A__ )
if not pairs:
return token
while True:
__lowerCamelCase : Any = min(A__ , key=lambda A__ : self.bpe_ranks.get(A__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase , __lowerCamelCase : int = bigram
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : Dict = 0
while i < len(A__ ):
try:
__lowerCamelCase : str = word.index(A__ , A__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCamelCase : List[Any] = j
if word[i] == first and i < len(A__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase : int = tuple(A__ )
__lowerCamelCase : Tuple = new_word
if len(A__ ) == 1:
break
else:
__lowerCamelCase : str = get_pairs(A__ )
__lowerCamelCase : Tuple = """ """.join(A__ )
__lowerCamelCase : int = word
return word
def a_ ( self : Any , A__ : List[str] ):
"""simple docstring"""
__lowerCamelCase : List[str] = []
for token in re.findall(self.pat , A__ ):
__lowerCamelCase : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A__ ).split(""" """ ) )
return bpe_tokens
def a_ ( self : List[Any] , A__ : str ):
"""simple docstring"""
return self.encoder.get(A__ , self.encoder.get(self.unk_token ) )
def a_ ( self : Optional[int] , A__ : Union[str, Any] ):
"""simple docstring"""
return self.decoder.get(A__ )
def a_ ( self : Dict , A__ : Any ):
"""simple docstring"""
__lowerCamelCase : Optional[int] = """""".join(A__ )
__lowerCamelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def a_ ( self : str , A__ : str , A__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(A__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__lowerCamelCase : Any = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCamelCase : Optional[Any] = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(A__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A__ , ensure_ascii=A__ ) + """\n""" )
__lowerCamelCase : Optional[int] = 0
with open(A__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A__ : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
__lowerCamelCase : List[Any] = token_index
writer.write(""" """.join(A__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def a_ ( self : List[Any] , A__ : List[int] , A__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase : str = [self.cls_token_id]
__lowerCamelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self : Union[str, Any] , A__ : List[int] , A__ : Optional[List[int]] = None , A__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
if token_ids_a is None:
return [1] + ([0] * len(A__ )) + [1]
return [1] + ([0] * len(A__ )) + [1, 1] + ([0] * len(A__ )) + [1]
def a_ ( self : str , A__ : List[int] , A__ : Optional[List[int]] = None ):
"""simple docstring"""
__lowerCamelCase : Dict = [self.sep_token_id]
__lowerCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a_ ( self : Any , A__ : Optional[int] , A__ : Dict=False , **A__ : Dict ):
"""simple docstring"""
__lowerCamelCase : Optional[Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A__ ) > 0 and not text[0].isspace()):
__lowerCamelCase : Tuple = """ """ + text
return (text, kwargs)
def a_ ( self : List[str] , A__ : Union[Dict[str, EncodedInput], BatchEncoding] , A__ : Optional[int] = None , A__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , A__ : Optional[int] = None , A__ : Optional[bool] = None , ):
"""simple docstring"""
__lowerCamelCase : Any = super()._pad(
encoded_inputs=A__ , max_length=A__ , padding_strategy=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , )
# Load from model defaults
if return_attention_mask is None:
__lowerCamelCase : Optional[Any] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__lowerCamelCase : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__lowerCamelCase : str = len(encoded_inputs["""global_attention_mask"""] ) != len(A__ )
if needs_to_be_padded:
__lowerCamelCase : str = len(A__ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__lowerCamelCase : Dict = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
__lowerCamelCase : List[str] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 150 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __lowercase (_lowercase ) -> Optional[Any]:
"""simple docstring"""
if not is_accelerate_available():
return method
__lowerCamelCase : Optional[int] = version.parse(accelerate.__version__ ).base_version
if version.parse(_lowercase ) < version.parse("""0.17.0""" ):
return method
def wrapper(self, *_lowercase, **_lowercase ):
if hasattr(self, """_hf_hook""" ) and hasattr(self._hf_hook, """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self, *_lowercase, **_lowercase )
return wrapper
| 150 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : Union[str, Any] ="""lilt"""
def __init__( self , UpperCamelCase_=3_0522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1E-12 , UpperCamelCase_=0 , UpperCamelCase_="absolute" , UpperCamelCase_=None , UpperCamelCase_=4 , UpperCamelCase_=1024 , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
lowercase_ :Tuple = vocab_size
lowercase_ :Optional[Any] = hidden_size
lowercase_ :Union[str, Any] = num_hidden_layers
lowercase_ :Optional[Any] = num_attention_heads
lowercase_ :str = hidden_act
lowercase_ :List[Any] = intermediate_size
lowercase_ :Optional[int] = hidden_dropout_prob
lowercase_ :int = attention_probs_dropout_prob
lowercase_ :int = max_position_embeddings
lowercase_ :Any = type_vocab_size
lowercase_ :Dict = initializer_range
lowercase_ :int = layer_norm_eps
lowercase_ :Tuple = position_embedding_type
lowercase_ :Optional[Any] = classifier_dropout
lowercase_ :Optional[int] = channel_shrink_ratio
lowercase_ :Optional[Any] = max_ad_position_embeddings
| 704 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=4 , ):
lowercase_ :Union[str, Any] = parent
lowercase_ :int = batch_size
lowercase_ :int = seq_length
lowercase_ :str = is_training
lowercase_ :Dict = use_attention_mask
lowercase_ :List[Any] = use_token_type_ids
lowercase_ :str = use_labels
lowercase_ :str = vocab_size
lowercase_ :Optional[int] = hidden_size
lowercase_ :Dict = num_hidden_layers
lowercase_ :List[str] = num_attention_heads
lowercase_ :int = intermediate_size
lowercase_ :Union[str, Any] = hidden_act
lowercase_ :Optional[int] = hidden_dropout_prob
lowercase_ :Tuple = attention_probs_dropout_prob
lowercase_ :int = max_position_embeddings
lowercase_ :List[Any] = type_vocab_size
lowercase_ :Any = type_sequence_label_size
lowercase_ :Tuple = initializer_range
lowercase_ :Any = num_choices
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :List[str] = None
if self.use_attention_mask:
lowercase_ :Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ :List[Any] = None
if self.use_token_type_ids:
lowercase_ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ :Union[str, Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ :List[str] = config_and_inputs
lowercase_ :Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCamelCase ( self ):
lowercase_ :Dict = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ :Dict = config_and_inputs
lowercase_ :str = True
lowercase_ :Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : int =True
lowercase : Dict =(
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = FlaxBertModelTester(self )
@slow
def UpperCamelCase ( self ):
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
lowercase_ :Dict = FlaxBertModel.from_pretrained('''bert-base-cased''' )
lowercase_ :str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
| 441 | 0 |
'''simple docstring'''
import math
def __UpperCAmelCase ( A : Optional[Any] , A : Any ) -> Tuple:
return math.pow(UpperCamelCase_ , 2 ) - a
def __UpperCAmelCase ( A : Tuple ) -> Optional[int]:
return 2 * x
def __UpperCAmelCase ( A : Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : str = 2.0
while start <= a:
UpperCAmelCase_ : Dict = math.pow(UpperCamelCase_ , 2 )
return start
def __UpperCAmelCase ( A : Dict , A : Tuple = 9_9_9_9 , A : Optional[int] = 0.00000000000001 ) -> Tuple:
if a < 0:
raise ValueError('''math domain error''' )
UpperCAmelCase_ : int = get_initial_point(UpperCamelCase_ )
for _ in range(UpperCamelCase_ ):
UpperCAmelCase_ : Tuple = value
UpperCAmelCase_ : List[str] = value - fx(UpperCamelCase_ , UpperCamelCase_ ) / fx_derivative(UpperCamelCase_ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 541 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
snake_case = '''ZinengTang/tvlt-base'''
snake_case = tempfile.mkdtemp()
def a_ ( self , **__snake_case ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **__snake_case )
def a_ ( self , **__snake_case ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__snake_case )
def a_ ( self ):
shutil.rmtree(self.tmpdirname )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
processor.save_pretrained(self.tmpdirname )
snake_case = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , __snake_case )
self.assertIsInstance(processor.image_processor , __snake_case )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
snake_case = np.ones([1_2_0_0_0] )
snake_case = feature_extractor(__snake_case , return_tensors='''np''' )
snake_case = processor(audio=__snake_case , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
snake_case = np.ones([3, 2_2_4, 2_2_4] )
snake_case = image_processor(__snake_case , return_tensors='''np''' )
snake_case = processor(images=__snake_case , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
snake_case = np.ones([1_2_0_0_0] )
snake_case = np.ones([3, 2_2_4, 2_2_4] )
snake_case = processor(audio=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 550 | 0 |
'''simple docstring'''
def lowerCamelCase_ ( lowercase__):
for i in range(0 , lowercase__):
for _ in range(0 , n - i - 1): # printing spaces
print(" " , end="")
for _ in range(0 , i + 1): # printing stars
print("* " , end="")
print()
def lowerCamelCase_ ( lowercase__):
for i in range(lowercase__ , 0 , -1):
for _ in range(lowercase__ , 0 , -1): # printing stars
print("* " , end="")
print()
for _ in range(n - i + 1 , 0 , -1): # printing spaces
print(" " , end="")
def lowerCamelCase_ ( lowercase__):
if n <= 0:
print(" ... .... nothing printing :(")
return
floyd(lowercase__) # upper half
reverse_floyd(lowercase__) # lower half
if __name__ == "__main__":
print(R"""| /\ | |- | |- |--| |\ /| |-""")
print(R"""|/ \| |- |_ |_ |__| | \/ | |_""")
__A : List[Any] = 1
while K:
__A : int = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
__A : List[str] = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 719 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__A : List[Any] = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__A : Dict = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__A : List[Any] = re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
__A : Union[str, Any] = re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__A : int = re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__A : List[Any] = [
("""pretraining""", """MODEL_FOR_PRETRAINING_MAPPING_NAMES""", """AutoModelForPreTraining"""),
("""feature-extraction""", """MODEL_MAPPING_NAMES""", """AutoModel"""),
("""audio-classification""", """MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForAudioClassification"""),
("""text-generation""", """MODEL_FOR_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForCausalLM"""),
("""automatic-speech-recognition""", """MODEL_FOR_CTC_MAPPING_NAMES""", """AutoModelForCTC"""),
("""image-classification""", """MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForImageClassification"""),
("""image-segmentation""", """MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES""", """AutoModelForImageSegmentation"""),
("""fill-mask""", """MODEL_FOR_MASKED_LM_MAPPING_NAMES""", """AutoModelForMaskedLM"""),
("""object-detection""", """MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES""", """AutoModelForObjectDetection"""),
(
"""zero-shot-object-detection""",
"""MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES""",
"""AutoModelForZeroShotObjectDetection""",
),
("""question-answering""", """MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForQuestionAnswering"""),
("""text2text-generation""", """MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForSeq2SeqLM"""),
("""text-classification""", """MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForSequenceClassification"""),
("""automatic-speech-recognition""", """MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES""", """AutoModelForSpeechSeq2Seq"""),
(
"""table-question-answering""",
"""MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForTableQuestionAnswering""",
),
("""token-classification""", """MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForTokenClassification"""),
("""multiple-choice""", """MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES""", """AutoModelForMultipleChoice"""),
(
"""next-sentence-prediction""",
"""MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES""",
"""AutoModelForNextSentencePrediction""",
),
(
"""audio-frame-classification""",
"""MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForAudioFrameClassification""",
),
("""audio-xvector""", """MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES""", """AutoModelForAudioXVector"""),
(
"""document-question-answering""",
"""MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForDocumentQuestionAnswering""",
),
(
"""visual-question-answering""",
"""MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForVisualQuestionAnswering""",
),
("""image-to-text""", """MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES""", """AutoModelForVision2Seq"""),
(
"""zero-shot-image-classification""",
"""MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForZeroShotImageClassification""",
),
("""depth-estimation""", """MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES""", """AutoModelForDepthEstimation"""),
("""video-classification""", """MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForVideoClassification"""),
("""mask-generation""", """MODEL_FOR_MASK_GENERATION_MAPPING_NAMES""", """AutoModelForMaskGeneration"""),
]
def lowerCamelCase_ ( lowercase__):
lowerCamelCase__ = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , lowercase__)
return [m.group(0) for m in matches]
def lowerCamelCase_ ( ):
lowerCamelCase__ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCamelCase__ = {
config.replace("Config" , ""): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
lowerCamelCase__ = collections.defaultdict(lowercase__)
lowerCamelCase__ = collections.defaultdict(lowercase__)
lowerCamelCase__ = collections.defaultdict(lowercase__)
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowercase__):
lowerCamelCase__ = None
if _re_tf_models.match(lowercase__) is not None:
lowerCamelCase__ = tf_models
lowerCamelCase__ = _re_tf_models.match(lowercase__).groups()[0]
elif _re_flax_models.match(lowercase__) is not None:
lowerCamelCase__ = flax_models
lowerCamelCase__ = _re_flax_models.match(lowercase__).groups()[0]
elif _re_pt_models.match(lowercase__) is not None:
lowerCamelCase__ = pt_models
lowerCamelCase__ = _re_pt_models.match(lowercase__).groups()[0]
if lookup_dict is not None:
while len(lowercase__) > 0:
if attr_name in model_prefix_to_model_type:
lowerCamelCase__ = True
break
# Try again after removing the last word in the name
lowerCamelCase__ = "".join(camel_case_split(lowercase__)[:-1])
lowerCamelCase__ = set(list(pt_models.keys()) + list(tf_models.keys()) + list(flax_models.keys()))
lowerCamelCase__ = list(lowercase__)
all_models.sort()
lowerCamelCase__ = {"model_type": all_models}
lowerCamelCase__ = [pt_models[t] for t in all_models]
lowerCamelCase__ = [tf_models[t] for t in all_models]
lowerCamelCase__ = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
lowerCamelCase__ = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
lowerCamelCase__ = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
lowerCamelCase__ = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
lowerCamelCase__ = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
lowerCamelCase__ = "AutoTokenizer"
lowerCamelCase__ = [processors[t] for t in all_models]
return pd.DataFrame(lowercase__)
def lowerCamelCase_ ( lowercase__):
lowerCamelCase__ = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
lowerCamelCase__ = [model_mapping, F'''TF_{model_mapping}''', F'''FLAX_{model_mapping}''']
lowerCamelCase__ = [auto_class, F'''TF_{auto_class}''', F'''Flax_{auto_class}''']
# Loop through all three frameworks
for module, cls, mapping in zip(lowercase__ , lowercase__ , lowercase__):
# The type of pipeline may not exist in this framework
if not hasattr(lowercase__ , lowercase__):
continue
# First extract all model_names
lowerCamelCase__ = []
for name in getattr(lowercase__ , lowercase__).values():
if isinstance(lowercase__ , lowercase__):
model_names.append(lowercase__)
else:
model_names.extend(list(lowercase__))
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names})
return table
def lowerCamelCase_ ( lowercase__ , lowercase__):
lowerCamelCase__ = get_frameworks_table()
lowerCamelCase__ = Dataset.from_pandas(lowercase__)
lowerCamelCase__ = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=lowercase__)
lowerCamelCase__ = Dataset.from_json(lowercase__)
lowerCamelCase__ = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(lowercase__))
}
lowerCamelCase__ = update_pipeline_and_auto_class_table(lowercase__)
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
lowerCamelCase__ = sorted(table.keys())
lowerCamelCase__ = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
})
lowerCamelCase__ = Dataset.from_pandas(lowercase__)
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowercase__ , "frameworks.json"))
tags_dataset.to_json(os.path.join(lowercase__ , "pipeline_tags.json"))
if commit_sha is not None:
lowerCamelCase__ = (
F'''Update with commit {commit_sha}\n\nSee: '''
F'''https://github.com/huggingface/transformers/commit/{commit_sha}'''
)
else:
lowerCamelCase__ = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=lowercase__ , repo_type="dataset" , token=lowercase__ , commit_message=lowercase__ , )
def lowerCamelCase_ ( ):
lowerCamelCase__ = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
lowerCamelCase__ = transformers_module.pipelines.SUPPORTED_TASKS
lowerCamelCase__ = []
for key in pipeline_tasks:
if key not in in_table:
lowerCamelCase__ = pipeline_tasks[key]["pt"]
if isinstance(lowercase__ , (list, tuple)):
lowerCamelCase__ = model[0]
lowerCamelCase__ = model.__name__
if model not in in_table.values():
missing.append(lowercase__)
if len(lowercase__) > 0:
lowerCamelCase__ = ", ".join(lowercase__)
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
F'''`utils/update_metadata.py`: {msg}. Please add them!''')
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
parser.add_argument("""--token""", type=str, help="""The token to use to push to the transformers-metadata dataset.""")
parser.add_argument("""--commit_sha""", type=str, help="""The sha of the commit going with this update.""")
parser.add_argument("""--check-only""", action="""store_true""", help="""Activate to just check all pipelines are present.""")
__A : Dict = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 187 | 0 |
import os
import sys
A : Tuple = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
A : Dict = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCamelCase ( *__magic_name__ : str , **__magic_name__ : List[Any] ) -> str:
"""simple docstring"""
return AutoConfig.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCamelCase ( *__magic_name__ : Any , **__magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCamelCase ( *__magic_name__ : str , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return AutoModel.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCamelCase ( *__magic_name__ : str , **__magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCamelCase ( *__magic_name__ : Optional[Any] , **__magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCamelCase ( *__magic_name__ : Any , **__magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCamelCase ( *__magic_name__ : Dict , **__magic_name__ : List[Any] ) -> int:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*__magic_name__ , **__magic_name__ )
| 15 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__SCREAMING_SNAKE_CASE ={
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = "mask2former"
SCREAMING_SNAKE_CASE__ : Any = ["swin"]
SCREAMING_SNAKE_CASE__ : int = {"hidden_size": "hidden_dim"}
def __init__( self: str , _lowerCamelCase: Optional[Dict] = None , _lowerCamelCase: int = 2_56 , _lowerCamelCase: int = 2_56 , _lowerCamelCase: int = 2_56 , _lowerCamelCase: int = 10_24 , _lowerCamelCase: str = "relu" , _lowerCamelCase: int = 6 , _lowerCamelCase: int = 10 , _lowerCamelCase: int = 8 , _lowerCamelCase: float = 0.0 , _lowerCamelCase: int = 20_48 , _lowerCamelCase: bool = False , _lowerCamelCase: bool = False , _lowerCamelCase: int = 4 , _lowerCamelCase: int = 2_55 , _lowerCamelCase: int = 1_00 , _lowerCamelCase: float = 0.1 , _lowerCamelCase: float = 2.0 , _lowerCamelCase: float = 5.0 , _lowerCamelCase: float = 5.0 , _lowerCamelCase: int = 1_25_44 , _lowerCamelCase: float = 3.0 , _lowerCamelCase: float = 0.75 , _lowerCamelCase: float = 0.02 , _lowerCamelCase: float = 1.0 , _lowerCamelCase: bool = True , _lowerCamelCase: List[int] = [4, 8, 16, 32] , _lowerCamelCase: bool = None , **_lowerCamelCase: Optional[int] , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING['''swin'''](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_lowerCamelCase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = backbone_config.pop('''model_type''' )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(_lowerCamelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. "
f"Supported model types: {','.join(self.backbones_supported )}" )
SCREAMING_SNAKE_CASE_ = backbone_config
SCREAMING_SNAKE_CASE_ = feature_size
SCREAMING_SNAKE_CASE_ = mask_feature_size
SCREAMING_SNAKE_CASE_ = hidden_dim
SCREAMING_SNAKE_CASE_ = encoder_feedforward_dim
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = dim_feedforward
SCREAMING_SNAKE_CASE_ = pre_norm
SCREAMING_SNAKE_CASE_ = enforce_input_projection
SCREAMING_SNAKE_CASE_ = common_stride
SCREAMING_SNAKE_CASE_ = ignore_value
SCREAMING_SNAKE_CASE_ = num_queries
SCREAMING_SNAKE_CASE_ = no_object_weight
SCREAMING_SNAKE_CASE_ = class_weight
SCREAMING_SNAKE_CASE_ = mask_weight
SCREAMING_SNAKE_CASE_ = dice_weight
SCREAMING_SNAKE_CASE_ = train_num_points
SCREAMING_SNAKE_CASE_ = oversample_ratio
SCREAMING_SNAKE_CASE_ = importance_sample_ratio
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = init_xavier_std
SCREAMING_SNAKE_CASE_ = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ = feature_strides
SCREAMING_SNAKE_CASE_ = output_auxiliary_logits
SCREAMING_SNAKE_CASE_ = decoder_layers
super().__init__(**_lowerCamelCase )
@classmethod
def _A ( cls: int , _lowerCamelCase: PretrainedConfig , **_lowerCamelCase: Tuple ):
return cls(
backbone_config=_lowerCamelCase , **_lowerCamelCase , )
def _A ( self: int ):
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output
| 234 | 0 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class _A ( enum.Enum ):
lowercase_ : Any = 0
lowercase_ : Union[str, Any] = 1
lowercase_ : Any = 2
@add_end_docstrings(UpperCAmelCase_ )
class _A ( UpperCAmelCase_ ):
lowercase_ : List[str] = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self : List[str] , *lowerCamelCase__ : int , **lowerCamelCase__ : List[str] ):
"""simple docstring"""
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__UpperCamelCase : str = None
if self.model.config.prefix is not None:
__UpperCamelCase : int = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__UpperCamelCase : int = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple = self._sanitize_parameters(prefix=lowerCamelCase__ , **self._forward_params )
__UpperCamelCase : str = {**self._preprocess_params, **preprocess_params}
__UpperCamelCase : str = {**self._forward_params, **forward_params}
def a ( self : Tuple , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : int=None , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Optional[int]=None , **lowerCamelCase__ : str , ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = {}
if prefix is not None:
__UpperCamelCase : Optional[int] = prefix
if prefix:
__UpperCamelCase : Optional[int] = self.tokenizer(
lowerCamelCase__ , padding=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=self.framework )
__UpperCamelCase : Union[str, Any] = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'
""" [None, 'hole']""" )
__UpperCamelCase : Tuple = handle_long_generation
preprocess_params.update(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] = generate_kwargs
__UpperCamelCase : int = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""" )
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""" )
__UpperCamelCase : List[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""" )
__UpperCamelCase : Dict = ReturnType.TENSORS
if return_type is not None:
__UpperCamelCase : List[str] = return_type
if clean_up_tokenization_spaces is not None:
__UpperCamelCase : Optional[int] = clean_up_tokenization_spaces
if stop_sequence is not None:
__UpperCamelCase : Dict = self.tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
if len(lowerCamelCase__ ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
__UpperCamelCase : List[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a ( self : List[str] , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : int ):
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True} )
return super()._parse_and_tokenize(*lowerCamelCase__ , **lowerCamelCase__ )
def __call__( self : Any , lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Optional[int] ):
"""simple docstring"""
return super().__call__(lowerCamelCase__ , **lowerCamelCase__ )
def a ( self : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]="" , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCamelCase : str = self.tokenizer(
prefix + prompt_text , padding=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=self.framework )
__UpperCamelCase : List[Any] = prompt_text
if handle_long_generation == "hole":
__UpperCamelCase : int = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
__UpperCamelCase : List[str] = generate_kwargs["""max_new_tokens"""]
else:
__UpperCamelCase : Optional[int] = generate_kwargs.get("""max_length""" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__UpperCamelCase : Dict = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""" )
__UpperCamelCase : List[str] = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
__UpperCamelCase : Optional[Any] = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def a ( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Dict ):
"""simple docstring"""
__UpperCamelCase : str = model_inputs["""input_ids"""]
__UpperCamelCase : Union[str, Any] = model_inputs.get("""attention_mask""" , lowerCamelCase__ )
# Allow empty prompts
if input_ids.shape[1] == 0:
__UpperCamelCase : Any = None
__UpperCamelCase : str = None
__UpperCamelCase : Tuple = 1
else:
__UpperCamelCase : Optional[int] = input_ids.shape[0]
__UpperCamelCase : Optional[Any] = model_inputs.pop("""prompt_text""" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__UpperCamelCase : Dict = generate_kwargs.pop("""prefix_length""" , 0 )
if prefix_length > 0:
__UpperCamelCase : int = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
__UpperCamelCase : Dict = generate_kwargs.get("""max_length""" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__UpperCamelCase : Optional[Any] = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__UpperCamelCase : Union[str, Any] = self.model.generate(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : Dict = generated_sequence.shape[0]
if self.framework == "pt":
__UpperCamelCase : str = generated_sequence.reshape(lowerCamelCase__ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__UpperCamelCase : List[str] = tf.reshape(lowerCamelCase__ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def a ( self : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any]=ReturnType.FULL_TEXT , lowerCamelCase__ : Optional[int]=True ):
"""simple docstring"""
__UpperCamelCase : str = model_outputs["""generated_sequence"""][0]
__UpperCamelCase : Any = model_outputs["""input_ids"""]
__UpperCamelCase : Optional[Any] = model_outputs["""prompt_text"""]
__UpperCamelCase : int = generated_sequence.numpy().tolist()
__UpperCamelCase : str = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__UpperCamelCase : Dict = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__UpperCamelCase : str = self.tokenizer.decode(
lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__UpperCamelCase : Tuple = 0
else:
__UpperCamelCase : Union[str, Any] = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ , ) )
if return_type == ReturnType.FULL_TEXT:
__UpperCamelCase : Optional[int] = prompt_text + text[prompt_length:]
else:
__UpperCamelCase : Union[str, Any] = text[prompt_length:]
__UpperCamelCase : Dict = {"""generated_text""": all_text}
records.append(lowerCamelCase__ )
return records
| 515 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
UpperCamelCase = r'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(UpperCAmelCase_ )
class _A ( UpperCAmelCase_ ):
lowercase_ : Tuple = '''rag'''
lowercase_ : Tuple = True
def __init__( self : str , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : str=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Optional[Any]=" / " , lowerCamelCase__ : Optional[int]=" // " , lowerCamelCase__ : int=5 , lowerCamelCase__ : int=3_00 , lowerCamelCase__ : Optional[int]=7_68 , lowerCamelCase__ : str=8 , lowerCamelCase__ : Any="wiki_dpr" , lowerCamelCase__ : Optional[Any]="train" , lowerCamelCase__ : Optional[int]="compressed" , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : int=0.0 , lowerCamelCase__ : Dict=True , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : str=False , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Optional[int]=None , **lowerCamelCase__ : Dict , ):
"""simple docstring"""
super().__init__(
bos_token_id=lowerCamelCase__ , pad_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , forced_eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , prefix=lowerCamelCase__ , vocab_size=lowerCamelCase__ , **lowerCamelCase__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__UpperCamelCase : Any = kwargs.pop("""question_encoder""" )
__UpperCamelCase : List[Any] = question_encoder_config.pop("""model_type""" )
__UpperCamelCase : Union[str, Any] = kwargs.pop("""generator""" )
__UpperCamelCase : List[Any] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
__UpperCamelCase : Tuple = AutoConfig.for_model(lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : List[str] = AutoConfig.for_model(lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : List[str] = reduce_loss
__UpperCamelCase : List[str] = label_smoothing
__UpperCamelCase : Union[str, Any] = exclude_bos_score
__UpperCamelCase : Tuple = do_marginalize
__UpperCamelCase : int = title_sep
__UpperCamelCase : Any = doc_sep
__UpperCamelCase : str = n_docs
__UpperCamelCase : Optional[int] = max_combined_length
__UpperCamelCase : Any = dataset
__UpperCamelCase : Tuple = dataset_split
__UpperCamelCase : List[Any] = index_name
__UpperCamelCase : List[str] = retrieval_vector_size
__UpperCamelCase : str = retrieval_batch_size
__UpperCamelCase : Optional[Any] = passages_path
__UpperCamelCase : Tuple = index_path
__UpperCamelCase : Dict = use_dummy_dataset
__UpperCamelCase : List[Any] = output_retrieved
__UpperCamelCase : Optional[int] = do_deduplication
__UpperCamelCase : int = use_cache
if self.forced_eos_token_id is None:
__UpperCamelCase : Union[str, Any] = getattr(self.generator , """forced_eos_token_id""" , lowerCamelCase__ )
@classmethod
def a ( cls : Any , lowerCamelCase__ : PretrainedConfig , lowerCamelCase__ : PretrainedConfig , **lowerCamelCase__ : List[str] ):
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowerCamelCase__ )
def a ( self : Optional[int] ):
"""simple docstring"""
__UpperCamelCase : int = copy.deepcopy(self.__dict__ )
__UpperCamelCase : List[str] = self.question_encoder.to_dict()
__UpperCamelCase : int = self.generator.to_dict()
__UpperCamelCase : Dict = self.__class__.model_type
return output
| 515 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : list )-> list:
'''simple docstring'''
UpperCAmelCase__ : Dict = len(snake_case )
for _ in range(snake_case ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_lowerCAmelCase : Any = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 438 |
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : int = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase : List[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
_lowerCAmelCase : List[str] = {
"""allenai/led-base-16384""": 16_384,
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ =LEDTokenizer
SCREAMING_SNAKE_CASE_ =['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , snake_case__ : str=None , snake_case__ : List[Any]=None , snake_case__ : Dict=None , snake_case__ : List[str]="replace" , snake_case__ : Optional[int]="<s>" , snake_case__ : List[str]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Dict="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : Any="<pad>" , snake_case__ : Dict="<mask>" , snake_case__ : int=False , snake_case__ : Optional[int]=True , **snake_case__ : List[Any] , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
UpperCAmelCase__ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case__ ) != add_prefix_space:
UpperCAmelCase__ : Dict = getattr(snake_case__ , pre_tok_state.pop("type" ) )
UpperCAmelCase__ : str = add_prefix_space
UpperCAmelCase__ : Any = pre_tok_class(**snake_case__ )
UpperCAmelCase__ : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase__ : List[str] = "post_processor"
UpperCAmelCase__ : List[Any] = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
UpperCAmelCase__ : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase__ : Optional[Any] = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase__ : Any = tuple(state["cls"] )
UpperCAmelCase__ : Any = False
if state.get("add_prefix_space" , snake_case__ ) != add_prefix_space:
UpperCAmelCase__ : Union[str, Any] = add_prefix_space
UpperCAmelCase__ : List[Any] = True
if state.get("trim_offsets" , snake_case__ ) != trim_offsets:
UpperCAmelCase__ : Optional[int] = trim_offsets
UpperCAmelCase__ : List[Any] = True
if changes_to_apply:
UpperCAmelCase__ : List[str] = getattr(snake_case__ , state.pop("type" ) )
UpperCAmelCase__ : Optional[int] = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __a ( self : Any ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __a ( self : Any , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
UpperCAmelCase__ : Dict = value
def __a ( self : str , *snake_case__ : Any , **snake_case__ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = kwargs.get("is_split_into_words" , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def __a ( self : List[str] , *snake_case__ : Union[str, Any] , **snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = kwargs.get("is_split_into_words" , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def __a ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def __a ( self : str , snake_case__ : List[Any] , snake_case__ : str=None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __a ( self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self : Any , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
'''simple docstring'''
UpperCAmelCase__ : str = super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase__ : Optional[int] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase__ : List[Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase__ : Any = len(encoded_inputs["global_attention_mask"] ) != len(snake_case__ )
if needs_to_be_padded:
UpperCAmelCase__ : List[str] = len(snake_case__ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase__ : Dict = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase__ : Dict = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 438 | 1 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = """esm"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : str=768 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : List[str]=12 , SCREAMING_SNAKE_CASE__ : int=3_072 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=1_026 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-1_2 , SCREAMING_SNAKE_CASE__ : int="absolute" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : str , ) -> int:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , mask_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = position_embedding_type
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = emb_layer_norm_before
lowerCAmelCase__ = token_dropout
lowerCAmelCase__ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
lowerCAmelCase__ = EsmFoldConfig()
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = EsmFoldConfig(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
lowerCAmelCase__ = get_default_vocab_list()
else:
lowerCAmelCase__ = vocab_list
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , SCREAMING_SNAKE_CASE__ ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def a ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase__ = super().to_dict()
if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = self.esmfold_config.to_dict()
return output
@dataclass
class __lowerCamelCase :
"""simple docstring"""
snake_case__ = None
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = 0
snake_case__ = True
snake_case__ = False
snake_case__ = 1_2_8
snake_case__ = None
def a ( self : str ) -> List[Any]:
if self.trunk is None:
lowerCAmelCase__ = TrunkConfig()
elif isinstance(self.trunk , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = TrunkConfig(**self.trunk )
def a ( self : Any ) -> Optional[Any]:
lowerCAmelCase__ = asdict(self )
lowerCAmelCase__ = self.trunk.to_dict()
return output
@dataclass
class __lowerCamelCase :
"""simple docstring"""
snake_case__ = 4_8
snake_case__ = 1_0_2_4
snake_case__ = 1_2_8
snake_case__ = 3_2
snake_case__ = 3_2
snake_case__ = 3_2
snake_case__ = 0
snake_case__ = 0
snake_case__ = False
snake_case__ = 4
snake_case__ = 1_2_8
snake_case__ = None
def a ( self : Tuple ) -> Dict:
if self.structure_module is None:
lowerCAmelCase__ = StructureModuleConfig()
elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
lowerCAmelCase__ = self.sequence_state_dim // self.sequence_head_width
lowerCAmelCase__ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def a ( self : Dict ) -> Optional[int]:
lowerCAmelCase__ = asdict(self )
lowerCAmelCase__ = self.structure_module.to_dict()
return output
@dataclass
class __lowerCamelCase :
"""simple docstring"""
snake_case__ = 3_8_4
snake_case__ = 1_2_8
snake_case__ = 1_6
snake_case__ = 1_2_8
snake_case__ = 1_2
snake_case__ = 4
snake_case__ = 8
snake_case__ = 0.1
snake_case__ = 8
snake_case__ = 1
snake_case__ = 2
snake_case__ = 7
snake_case__ = 1_0
snake_case__ = 1E-8
snake_case__ = 1E5
def a ( self : Optional[Any] ) -> Any:
return asdict(self )
def _A ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 721 |
import functools
from typing import Any
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : list[str] ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or len(lowerCAmelCase_ ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not all(
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
lowerCAmelCase__ = {}
lowerCAmelCase__ = "WORD_KEEPER"
for word in words:
lowerCAmelCase__ = trie
for c in word:
if c not in trie_node:
lowerCAmelCase__ = {}
lowerCAmelCase__ = trie_node[c]
lowerCAmelCase__ = True
lowerCAmelCase__ = len(lowerCAmelCase_ )
# Dynamic programming method
@functools.cache
def is_breakable(lowerCAmelCase_ : int ) -> bool:
if index == len_string:
return True
lowerCAmelCase__ = trie
for i in range(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCAmelCase__ = trie_node.get(string[i] , lowerCAmelCase_ )
if trie_node is None:
return False
if trie_node.get(lowerCAmelCase_ , lowerCAmelCase_ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 125 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ ( a_ , unittest.TestCase ):
_A : Any = BertTokenizer
_A : Optional[Any] = BertTokenizerFast
_A : Union[str, Any] = True
_A : Union[str, Any] = True
_A : Tuple = filter_non_english
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
UpperCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def UpperCamelCase_ ( self , snake_case__ ) -> str:
"""simple docstring"""
UpperCAmelCase = """UNwant\u00E9d,running"""
UpperCAmelCase = """unwanted, running"""
return input_text, output_text
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = self.tokenizer_class(self.vocab_file )
UpperCAmelCase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(snake_case__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [9, 6, 7, 12, 10, 11] )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = """UNwant\u00E9d,running"""
UpperCAmelCase = tokenizer.tokenize(snake_case__ )
UpperCAmelCase = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
UpperCAmelCase = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
UpperCAmelCase = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(snake_case__ )
UpperCAmelCase = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
# With lower casing
UpperCAmelCase = self.get_tokenizer(do_lower_case=snake_case__ )
UpperCAmelCase = self.get_rust_tokenizer(do_lower_case=snake_case__ )
UpperCAmelCase = """UNwant\u00E9d,running"""
UpperCAmelCase = tokenizer.tokenize(snake_case__ )
UpperCAmelCase = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
UpperCAmelCase = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
UpperCAmelCase = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(snake_case__ )
UpperCAmelCase = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=snake_case__ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer()
UpperCAmelCase = """a\n'll !!to?'d of, can't."""
UpperCAmelCase = ["""a""", """'""", """ll""", """!""", """!""", """to""", """?""", """'""", """d""", """of""", """,""", """can""", """'""", """t""", """."""]
self.assertListEqual(tokenizer.tokenize(snake_case__ ) , snake_case__ )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
UpperCAmelCase = {}
for i, token in enumerate(snake_case__ ):
UpperCAmelCase = i
UpperCAmelCase = WordpieceTokenizer(vocab=snake_case__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(snake_case__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(snake_case__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.tokenizer_class.from_pretrained("""bert-base-uncased""" )
UpperCAmelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=snake_case__ )
UpperCAmelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=snake_case__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
UpperCAmelCase = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
UpperCAmelCase = tokenizer_r.encode_plus(
snake_case__ , return_attention_mask=snake_case__ , return_token_type_ids=snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ , )
UpperCAmelCase = tokenizer_r.do_lower_case if hasattr(snake_case__ , """do_lower_case""" ) else False
UpperCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = ["""的""", """人""", """有"""]
UpperCAmelCase = """""".join(snake_case__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase = True
UpperCAmelCase = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
UpperCAmelCase = tokenizer_p.encode(snake_case__ , add_special_tokens=snake_case__ )
UpperCAmelCase = tokenizer_r.encode(snake_case__ , add_special_tokens=snake_case__ )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(snake_case__ )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(snake_case__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
UpperCAmelCase = False
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
UpperCAmelCase = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
UpperCAmelCase = tokenizer_r.encode(snake_case__ , add_special_tokens=snake_case__ )
UpperCAmelCase = tokenizer_p.encode(snake_case__ , add_special_tokens=snake_case__ )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(snake_case__ )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(snake_case__ )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(snake_case__ )
]
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
| 673 |
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase_ : List[str] = False
class UpperCamelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self , snake_case__=32 ) -> Optional[Any]:
"""simple docstring"""
set_seed(0 )
UpperCAmelCase = UNetaDModel(sample_size=snake_case__ , in_channels=3 , out_channels=3 )
UpperCAmelCase = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=snake_case__ , )
UpperCAmelCase = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=snake_case__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
UpperCAmelCase = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(snake_case__ ) for _ in range(4 )]
UpperCAmelCase = [torch.randn((4, 3, 32, 32) ).to(snake_case__ ) for _ in range(4 )]
UpperCAmelCase = [torch.randint(0 , 10_00 , (4,) ).long().to(snake_case__ ) for _ in range(4 )]
# train with a DDPM scheduler
UpperCAmelCase , UpperCAmelCase = self.get_model_optimizer(resolution=32 )
model.train().to(snake_case__ )
for i in range(4 ):
optimizer.zero_grad()
UpperCAmelCase = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
UpperCAmelCase = model(snake_case__ , timesteps[i] ).sample
UpperCAmelCase = torch.nn.functional.mse_loss(snake_case__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
UpperCAmelCase , UpperCAmelCase = self.get_model_optimizer(resolution=32 )
model.train().to(snake_case__ )
for i in range(4 ):
optimizer.zero_grad()
UpperCAmelCase = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
UpperCAmelCase = model(snake_case__ , timesteps[i] ).sample
UpperCAmelCase = torch.nn.functional.mse_loss(snake_case__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-5 ) )
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-5 ) )
| 673 | 1 |
a_ : Optional[int] = {
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
} | 721 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : int = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure) | 484 | 0 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
a__: Union[str, Any] = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCamelCase__( UpperCamelCase__ : Any , UpperCamelCase__ : Any=None )->str:
require_version(deps[pkg] , UpperCamelCase__ )
| 190 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self,**__lowerCamelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname,**__lowerCamelCase ).image_processor
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
A__ = [np.random.randint(255,size=(3, 30, 400),dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(__lowerCamelCase,0,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ):
A__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_image_processor(do_normalize=__lowerCamelCase,padding_value=1.0 )
A__ = SamProcessor.from_pretrained(self.tmpdirname,do_normalize=__lowerCamelCase,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string(),image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor,__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=__lowerCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(__lowerCamelCase,return_tensors='''np''' )
A__ = processor(images=__lowerCamelCase,return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(),input_processor[key].sum(),delta=1E-2 )
@require_torch
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=__lowerCamelCase )
A__ = [torch.ones((1, 3, 5, 5) )]
A__ = [[1764, 2646]]
A__ = [[683, 1024]]
A__ = processor.post_process_masks(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
self.assertEqual(masks[0].shape,(1, 3, 1764, 2646) )
A__ = processor.post_process_masks(
__lowerCamelCase,torch.tensor(__lowerCamelCase ),torch.tensor(__lowerCamelCase ) )
self.assertEqual(masks[0].shape,(1, 3, 1764, 2646) )
# should also work with np
A__ = [np.ones((1, 3, 5, 5) )]
A__ = processor.post_process_masks(__lowerCamelCase,np.array(__lowerCamelCase ),np.array(__lowerCamelCase ) )
self.assertEqual(masks[0].shape,(1, 3, 1764, 2646) )
A__ = [[1, 0], [0, 1]]
with self.assertRaises(__lowerCamelCase ):
A__ = processor.post_process_masks(__lowerCamelCase,np.array(__lowerCamelCase ),np.array(__lowerCamelCase ) )
@require_vision
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self,**__lowerCamelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname,**__lowerCamelCase ).image_processor
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
A__ = [np.random.randint(255,size=(3, 30, 400),dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(__lowerCamelCase,0,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ):
A__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_image_processor(do_normalize=__lowerCamelCase,padding_value=1.0 )
A__ = SamProcessor.from_pretrained(self.tmpdirname,do_normalize=__lowerCamelCase,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string(),image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor,__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=__lowerCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(__lowerCamelCase,return_tensors='''np''' )
A__ = processor(images=__lowerCamelCase,return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(),input_processor[key].sum(),delta=1E-2 )
@require_tf
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=__lowerCamelCase )
A__ = [tf.ones((1, 3, 5, 5) )]
A__ = [[1764, 2646]]
A__ = [[683, 1024]]
A__ = processor.post_process_masks(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,return_tensors='''tf''' )
self.assertEqual(masks[0].shape,(1, 3, 1764, 2646) )
A__ = processor.post_process_masks(
__lowerCamelCase,tf.convert_to_tensor(__lowerCamelCase ),tf.convert_to_tensor(__lowerCamelCase ),return_tensors='''tf''',)
self.assertEqual(masks[0].shape,(1, 3, 1764, 2646) )
# should also work with np
A__ = [np.ones((1, 3, 5, 5) )]
A__ = processor.post_process_masks(
__lowerCamelCase,np.array(__lowerCamelCase ),np.array(__lowerCamelCase ),return_tensors='''tf''' )
self.assertEqual(masks[0].shape,(1, 3, 1764, 2646) )
A__ = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
A__ = processor.post_process_masks(
__lowerCamelCase,np.array(__lowerCamelCase ),np.array(__lowerCamelCase ),return_tensors='''tf''' )
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self,**__lowerCamelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname,**__lowerCamelCase ).image_processor
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
A__ = [np.random.randint(255,size=(3, 30, 400),dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(__lowerCamelCase,0,-1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=__lowerCamelCase )
A__ = np.random.randint(0,2,size=(1, 3, 5, 5) ).astype(np.floataa )
A__ = [tf.convert_to_tensor(__lowerCamelCase )]
A__ = [torch.tensor(__lowerCamelCase )]
A__ = [[1764, 2646]]
A__ = [[683, 1024]]
A__ = processor.post_process_masks(
__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,return_tensors='''tf''' )
A__ = processor.post_process_masks(
__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=__lowerCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(__lowerCamelCase,return_tensors='''pt''' )['''pixel_values'''].numpy()
A__ = processor(images=__lowerCamelCase,return_tensors='''pt''' )['''pixel_values'''].numpy()
A__ = image_processor(__lowerCamelCase,return_tensors='''tf''' )['''pixel_values'''].numpy()
A__ = processor(images=__lowerCamelCase,return_tensors='''tf''' )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(__lowerCamelCase,__lowerCamelCase ) )
self.assertTrue(np.allclose(__lowerCamelCase,__lowerCamelCase ) )
self.assertTrue(np.allclose(__lowerCamelCase,__lowerCamelCase ) )
| 190 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case : Optional[Any] = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
snake_case : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 657 |
from sklearn.metrics import fa_score
import datasets
snake_case : Optional[int] = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
snake_case : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
snake_case : Union[str, Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :Any ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) ,reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] ,)
def lowerCamelCase__( self :Dict ,__snake_case :str ,__snake_case :str ,__snake_case :Dict=None ,__snake_case :str=1 ,__snake_case :Optional[int]="binary" ,__snake_case :Union[str, Any]=None ) -> Tuple:
a__ = fa_score(
__snake_case ,__snake_case ,labels=__snake_case ,pos_label=__snake_case ,average=__snake_case ,sample_weight=__snake_case )
return {"f1": float(__snake_case ) if score.size == 1 else score}
| 657 | 1 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase = 6008_5147_5143 ) -> int:
"""simple docstring"""
try:
__UpperCAmelCase : Dict = int(UpperCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Optional[int] = 2
while i * i <= n:
while n % i == 0:
__UpperCAmelCase : Union[str, Any] = i
n //= i
i += 1
if n > 1:
__UpperCAmelCase : List[str] = n
return int(UpperCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 77 |
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : int | float | str , SCREAMING_SNAKE_CASE_ : int | float | str ):
'''simple docstring'''
if nth_term == "":
return [""]
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = []
for temp in range(int(SCREAMING_SNAKE_CASE_ ) ):
series.append(F'''1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE_ ) )}''' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = int(input("Enter the last number (nth term) of the P-Series"))
_SCREAMING_SNAKE_CASE = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 18 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Dict:
a_ : Any = torch.load(__UpperCamelCase, map_location="cpu" )
if "model" in sd.keys():
a_ : Optional[Any] = torch.load(__UpperCamelCase, map_location="cpu" )["model"]
# pop unnecessary weights
a_ : List[str] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(__UpperCamelCase )
a_ : int = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
a_ : int = sd.pop(__UpperCamelCase )
a_ : Dict = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
a_ : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
a_ : int = key.replace(".qkv_proj.", ".q_proj." )
a_ : List[Any] = key.replace(".qkv_proj.", ".k_proj." )
a_ : Optional[int] = key.replace(".qkv_proj.", ".v_proj." )
a_ : List[Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
a_ , a_ , a_ : List[str] = torch.split(__UpperCamelCase, depth // 3, dim=0 )
a_ : Optional[Any] = q
a_ : Optional[Any] = k
a_ : Dict = v
del sd[key]
return sd
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=None ) -> Any:
a_ : Optional[Any] = load_checkpoint(__UpperCamelCase )
if config is not None:
a_ : Any = OPTConfig.from_pretrained(__UpperCamelCase )
else:
a_ : Dict = OPTConfig()
a_ : Optional[Any] = OPTModel(__UpperCamelCase ).half().eval()
model.load_state_dict(__UpperCamelCase )
# Check results
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 713 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case_ ( a_ ):
__lowerCAmelCase = ["image_processor", "tokenizer"]
__lowerCAmelCase = "ViltImageProcessor"
__lowerCAmelCase = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , a_=None , a_=None , **a_ ):
a_ : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a_ , )
a_ : List[Any] = kwargs.pop("feature_extractor" )
a_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a_ , a_ )
a_ : Dict = self.image_processor
def __call__( self , a_ , a_ = None , a_ = True , a_ = False , a_ = None , a_ = None , a_ = 0 , a_ = None , a_ = None , a_ = None , a_ = False , a_ = False , a_ = False , a_ = False , a_ = True , a_ = None , **a_ , ):
a_ : Union[str, Any] = self.tokenizer(
text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , )
# add pixel_values + pixel_mask
a_ : List[Any] = self.image_processor(a_ , return_tensors=a_ )
encoding.update(a_ )
return encoding
def snake_case_ ( self , *a_ , **a_ ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def snake_case_ ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def snake_case_ ( self ):
a_ : Union[str, Any] = self.tokenizer.model_input_names
a_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def snake_case_ ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a_ , )
return self.image_processor_class
@property
def snake_case_ ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a_ , )
return self.image_processor | 370 | 0 |
import math
def UpperCAmelCase_ ( __UpperCAmelCase : float , __UpperCAmelCase : float ) -> float:
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCAmelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law') | 31 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def _SCREAMING_SNAKE_CASE ( ) -> Any:
_UpperCAmelCase = 9
_UpperCAmelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
_UpperCAmelCase = kruskal(snake_case , snake_case )
_UpperCAmelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(snake_case ) == sorted(snake_case ) | 518 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 702 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_lowercase = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
_lowercase = '''\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
'''
_lowercase = '''\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
'''
_lowercase = '''
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
\'meteor\': meteor score.
Examples:
>>> meteor = datasets.load_metric(\'meteor\')
>>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
>>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results["meteor"], 4))
0.6944
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _snake_case (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] , )
def _snake_case (self , __magic_name__ ):
import nltk
nltk.download("""wordnet""" )
if NLTK_VERSION >= version.Version("""3.6.5""" ):
nltk.download("""punkt""" )
if NLTK_VERSION >= version.Version("""3.6.6""" ):
nltk.download("""omw-1.4""" )
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__=0.9 , __magic_name__=3 , __magic_name__=0.5 ):
if NLTK_VERSION >= version.Version("""3.6.5""" ):
lowerCamelCase__ : List[str] = [
meteor_score.single_meteor_score(
word_tokenize(__magic_name__ ) , word_tokenize(__magic_name__ ) , alpha=__magic_name__ , beta=__magic_name__ , gamma=__magic_name__ )
for ref, pred in zip(__magic_name__ , __magic_name__ )
]
else:
lowerCamelCase__ : int = [
meteor_score.single_meteor_score(__magic_name__ , __magic_name__ , alpha=__magic_name__ , beta=__magic_name__ , gamma=__magic_name__ )
for ref, pred in zip(__magic_name__ , __magic_name__ )
]
return {"meteor": np.mean(__magic_name__ )}
| 96 | 0 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowerCamelCase__ (__lowerCamelCase ):
if not is_accelerate_available():
return method
_SCREAMING_SNAKE_CASE : Union[str, Any] = version.parse(accelerate.__version__ ).base_version
if version.parse(lowerCamelCase_ ) < version.parse("0.17.0" ):
return method
def wrapper(self, *__lowerCamelCase, **__lowerCamelCase ):
if hasattr(self, "_hf_hook" ) and hasattr(self._hf_hook, "pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self, *lowerCamelCase_, **lowerCamelCase_ )
return wrapper | 249 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
class __magic_name__ ( A__ ):
lowercase : str =['''pixel_values''']
def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : int = 32 , UpperCamelCase__ : Tuple=PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ) -> None:
'''simple docstring'''
UpperCAmelCase = do_resize
UpperCAmelCase = do_rescale
UpperCAmelCase = size_divisor
UpperCAmelCase = resample
super().__init__(**UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[ChannelDimension] = None , **UpperCamelCase__ : str ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = get_image_size(UpperCamelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCAmelCase = height // size_divisor * size_divisor
UpperCAmelCase = width // size_divisor * size_divisor
UpperCAmelCase = resize(UpperCamelCase__ , (new_h, new_w) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
return image
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[ChannelDimension] = None , **UpperCamelCase__ : Dict ) -> np.ndarray:
'''simple docstring'''
return rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCamelCase__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[TensorType, str]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Tuple , ) -> BatchFeature:
'''simple docstring'''
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = size_divisor if size_divisor is not None else self.size_divisor
UpperCAmelCase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing" )
UpperCAmelCase = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError("Invalid image(s)" )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(UpperCamelCase__ ) for img in images]
if do_resize:
UpperCAmelCase = [self.resize(UpperCamelCase__ , size_divisor=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(UpperCamelCase__ , scale=1 / 2_55 ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
UpperCAmelCase = {"pixel_values": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 323 | 0 |
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 666 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : tuple[int, int] , __A : int ):
a_ , a_ : List[str] = position
a_ : Optional[int] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
a_ : Any = []
for position in positions:
a_ , a_ : Dict = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__A )
return permissible_positions
def _UpperCAmelCase ( __A : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def _UpperCAmelCase ( __A : list[list[int]] , __A : tuple[int, int] , __A : int ):
if is_complete(__A ):
return True
for position in get_valid_pos(__A , len(__A ) ):
a_ , a_ : Dict = position
if board[y][x] == 0:
a_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__A , __A , curr + 1 ):
return True
a_ : Tuple = 0
return False
def _UpperCAmelCase ( __A : int ):
a_ : List[str] = [[0 for i in range(__A )] for j in range(__A )]
for i in range(__A ):
for j in range(__A ):
a_ : Optional[Any] = 1
if open_knight_tour_helper(__A , (i, j) , 1 ):
return board
a_ : Union[str, Any] = 0
a_ : Dict = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 1 |
"""simple docstring"""
import re
import string
import numpy as np
import datasets
__lowerCamelCase = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
__lowerCamelCase = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
__lowerCamelCase = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def lowerCamelCase__ ( self : Tuple , __snake_case : str , __snake_case : Optional[int] , __snake_case : List[Any]=None , __snake_case : Dict=False , __snake_case : Optional[int]=False , __snake_case : Optional[Any]=False , ) -> Tuple:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__magic_name__: Optional[int] = np.array([re.sub(_a , """""" , _a ) for x in predictions] )
__magic_name__: Optional[Any] = np.array([re.sub(_a , """""" , _a ) for x in references] )
else:
__magic_name__: str = np.asarray(_a )
__magic_name__: str = np.asarray(_a )
if ignore_case:
__magic_name__: Optional[int] = np.char.lower(_a )
__magic_name__: Optional[int] = np.char.lower(_a )
if ignore_punctuation:
__magic_name__: Optional[int] = string.punctuation.maketrans("""""" , """""" , string.punctuation )
__magic_name__: Any = np.char.translate(_a , table=_a )
__magic_name__: Optional[Any] = np.char.translate(_a , table=_a )
if ignore_numbers:
__magic_name__: List[Any] = string.digits.maketrans("""""" , """""" , string.digits )
__magic_name__: Union[str, Any] = np.char.translate(_a , table=_a )
__magic_name__: List[str] = np.char.translate(_a , table=_a )
__magic_name__: Any = predictions == references
return {"exact_match": np.mean(_a ) * 1_0_0}
| 96 |
"""simple docstring"""
from math import factorial
def _lowercase ( __lowerCAmelCase = 100 ) -> int:
return sum(int(__lowerCAmelCase ) for x in str(factorial(__lowerCAmelCase ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 680 | 0 |
"""simple docstring"""
from __future__ import annotations
A_ = 8.9_88e9 # units = N * m^s * C^-2
def _lowerCAmelCase ( UpperCAmelCase__ : float, UpperCAmelCase__ : float, UpperCAmelCase__ : float, UpperCAmelCase__ : float ) ->dict[str, float]:
A__ : Dict = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if distance < 0:
raise ValueError("""Distance cannot be negative""" )
if force == 0:
A__ : Tuple = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
A__ : Dict = abs(UpperCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
A__ : Union[str, Any] = abs(UpperCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
A__ : Optional[Any] = (COULOMBS_CONSTANT * charge_product / abs(UpperCAmelCase__ )) ** 0.5
return {"distance": distance}
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'roc_bert'
def __init__( self : Tuple , snake_case : Union[str, Any]=3_0522 , snake_case : Optional[Any]=768 , snake_case : Any=12 , snake_case : Tuple=12 , snake_case : Any=3072 , snake_case : Tuple="gelu" , snake_case : Optional[int]=0.1 , snake_case : List[str]=0.1 , snake_case : List[str]=512 , snake_case : int=2 , snake_case : Optional[Any]=0.02 , snake_case : str=1e-12 , snake_case : int=True , snake_case : Optional[Any]=0 , snake_case : str="absolute" , snake_case : int=None , snake_case : int=True , snake_case : Optional[int]=True , snake_case : List[Any]=768 , snake_case : Dict=910 , snake_case : Tuple=512 , snake_case : Tuple=2_4858 , snake_case : Optional[Any]=True , **snake_case : Any , ):
'''simple docstring'''
A__ : Optional[Any] = vocab_size
A__ : Union[str, Any] = max_position_embeddings
A__ : Any = hidden_size
A__ : Optional[int] = num_hidden_layers
A__ : Optional[Any] = num_attention_heads
A__ : int = intermediate_size
A__ : Optional[Any] = hidden_act
A__ : Any = hidden_dropout_prob
A__ : Tuple = attention_probs_dropout_prob
A__ : Union[str, Any] = initializer_range
A__ : str = type_vocab_size
A__ : Any = layer_norm_eps
A__ : List[str] = use_cache
A__ : List[str] = enable_pronunciation
A__ : int = enable_shape
A__ : Tuple = pronunciation_embed_dim
A__ : str = pronunciation_vocab_size
A__ : Optional[Any] = shape_embed_dim
A__ : str = shape_vocab_size
A__ : List[str] = concat_input
A__ : str = position_embedding_type
A__ : str = classifier_dropout
super().__init__(pad_token_id=snake_case , **snake_case )
| 498 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( UpperCamelCase__ , unittest.TestCase):
_UpperCAmelCase : str = KandinskyImgaImgPipeline
_UpperCAmelCase : Optional[int] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
_UpperCAmelCase : Union[str, Any] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
_UpperCAmelCase : List[str] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_UpperCAmelCase : List[Any] = False
@property
def UpperCamelCase__ ( self ):
return 3_2
@property
def UpperCamelCase__ ( self ):
return 3_2
@property
def UpperCamelCase__ ( self ):
return self.time_input_dim
@property
def UpperCamelCase__ ( self ):
return self.time_input_dim * 4
@property
def UpperCamelCase__ ( self ):
return 1_0_0
@property
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
lowerCamelCase : Tuple = MultilingualCLIP(SCREAMING_SNAKE_CASE__ )
lowerCamelCase : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
lowerCamelCase : Union[str, Any] = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowerCamelCase : Union[str, Any] = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__ )
return model
@property
def UpperCamelCase__ ( self ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
lowerCamelCase : Any = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.dummy_text_encoder
lowerCamelCase : Any = self.dummy_tokenizer
lowerCamelCase : Union[str, Any] = self.dummy_unet
lowerCamelCase : Dict = self.dummy_movq
lowerCamelCase : List[Any] = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00_085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
lowerCamelCase : Dict = DDIMScheduler(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase : str = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=0 ):
lowerCamelCase : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase : str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(SCREAMING_SNAKE_CASE__ )
# create init_image
lowerCamelCase : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase : Optional[Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
lowerCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCamelCase : List[str] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 1_0,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """cpu"""
lowerCamelCase : Union[str, Any] = self.get_dummy_components()
lowerCamelCase : Optional[int] = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase : Optional[Any] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase : Optional[int] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase : List[str] = output.images
lowerCamelCase : Any = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
lowerCamelCase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase : int = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
lowerCamelCase : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowerCamelCase : List[Any] = """A red cartoon frog, 4k"""
lowerCamelCase : List[str] = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase : Optional[Any] = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
lowerCamelCase : Any = pipeline.to(SCREAMING_SNAKE_CASE__ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCamelCase , lowerCamelCase : Optional[int] = pipe_prior(
SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowerCamelCase : Union[str, Any] = pipeline(
SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , image_embeds=SCREAMING_SNAKE_CASE__ , negative_image_embeds=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="""np""" , )
lowerCamelCase : Optional[Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 681 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = XLNetTokenizer
snake_case__ = XLNetTokenizerFast
snake_case__ = True
snake_case__ = True
def a ( self : str ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : List[str] ) -> List[Any]:
lowerCAmelCase__ = "<s>"
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] ) -> str:
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<eod>" )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 1_006 )
def a ( self : int ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def a ( self : List[str] ) -> Any:
lowerCAmelCase__ = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [285, 46, 10, 170, 382] )
lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def a ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase__ = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + "",
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] )
def a ( self : List[Any] ) -> Optional[int]:
lowerCAmelCase__ = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
@slow
def a ( self : Any ) -> Any:
lowerCAmelCase__ = XLNetTokenizer.from_pretrained("xlnet-base-cased" )
lowerCAmelCase__ = tokenizer.encode("sequence builders" , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.encode("multi-sequence build" , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def a ( self : Union[str, Any] ) -> Any:
# fmt: off
lowerCAmelCase__ = {"input_ids": [[17, 21_442, 270, 17, 10, 14_645, 318, 34, 17, 4_546, 3_145, 787, 13, 7_752, 22_018, 23, 21, 17, 4_546, 3_145, 787, 13, 3_352, 14_431, 13, 5_500, 11, 1_176, 580, 13, 16_819, 4_797, 23, 17, 10, 17_135, 658, 19, 457, 7_932, 13, 184, 19, 3_154, 17_135, 6_468, 19, 1_404, 12_269, 19, 4_229, 5_356, 16_264, 46, 19, 17, 20_545, 10_395, 9, 9, 9, 11, 28, 6_421, 9_531, 20_729, 17, 10, 353, 17_022, 11, 21, 6_421, 9_531, 16_949, 17, 10, 11_509, 753, 11, 33, 95, 2_421, 7_385, 956, 14_431, 2_626, 25, 842, 7_385, 4_836, 21, 1_429, 2_272, 9_855, 3_120, 161, 24_738, 19, 13_203, 658, 218, 787, 21, 430, 18_482, 847, 2_637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22_178, 27, 1_064, 22, 956, 13, 11_101, 1_429, 5_854, 24_313, 18_953, 40, 422, 24_366, 68, 1_758, 37, 10_483, 14_257, 31, 207, 263, 21, 203, 3_773, 25, 71, 9_735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2_049, 3_442, 17, 13_894, 3_380, 23, 95, 18, 17_634, 2_288, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
| 61 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__a: Optional[Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Optional[Any] = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: int = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: List[Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__a: str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 402 |
__a: int = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__a: List[str] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> list[int]:
_UpperCAmelCase = True
_UpperCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__snake_case , __snake_case , __snake_case )
order.append(__snake_case )
return order
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> list[int]:
_UpperCAmelCase = True
_UpperCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__snake_case , __snake_case , __snake_case )
return component
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> list[list[int]]:
_UpperCAmelCase = len(__snake_case ) * [False]
_UpperCAmelCase = {vert: [] for vert in range(len(__snake_case ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__snake_case )
_UpperCAmelCase = []
for i, was_visited in enumerate(__snake_case ):
if not was_visited:
order += topology_sort(__snake_case , __snake_case , __snake_case )
_UpperCAmelCase = []
_UpperCAmelCase = len(__snake_case ) * [False]
for i in range(len(__snake_case ) ):
_UpperCAmelCase = order[len(__snake_case ) - i - 1]
if not visited[vert]:
_UpperCAmelCase = find_components(__snake_case , __snake_case , __snake_case )
components_list.append(__snake_case )
return components_list | 402 | 1 |
from collections import deque
def A ( __UpperCamelCase ) -> List[Any]:
A__ = len(__UpperCamelCase )
A__ = deque()
A__ = [False for _ in range(__UpperCamelCase )]
A__ = [-1 for _ in range(__UpperCamelCase )]
A__ = index_of[:]
def strong_connect(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
A__ = index # the number when this node is seen
A__ = index # lowest rank node reachable from here
index += 1
stack.append(__UpperCamelCase )
A__ = True
for w in g[v]:
if index_of[w] == -1:
A__ = strong_connect(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
A__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
A__ = []
A__ = stack.pop()
A__ = False
component.append(__UpperCamelCase )
while w != v:
A__ = stack.pop()
A__ = False
component.append(__UpperCamelCase )
components.append(__UpperCamelCase )
return index
A__ = []
for v in range(__UpperCamelCase ):
if index_of[v] == -1:
strong_connect(__UpperCamelCase , 0 , __UpperCamelCase )
return components
def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
A__ = [[] for _ in range(__UpperCamelCase )]
for u, v in edges:
g[u].append(__UpperCamelCase )
return g
if __name__ == "__main__":
# Test
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = [0, 0, 1, 2, 3, 3, 4, 4, 6]
SCREAMING_SNAKE_CASE__ = [1, 3, 2, 0, 1, 4, 5, 6, 5]
SCREAMING_SNAKE_CASE__ = [(u, v) for u, v in zip(source, target)]
SCREAMING_SNAKE_CASE__ = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 9 |
'''simple docstring'''
lowerCAmelCase : List[str] = 2_5_6
# Modulus to hash a string
lowerCAmelCase : Tuple = 1_0_0_0_0_0_3
def _A ( A ,A ) -> bool:
lowercase : List[Any] = len(A )
lowercase : List[Any] = len(A )
if p_len > t_len:
return False
lowercase : List[str] = 0
lowercase : Dict = 0
lowercase : Any = 1
# Calculating the hash of pattern and substring of text
for i in range(A ):
lowercase : Optional[int] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowercase : Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowercase : List[str] = (modulus_power * alphabet_size) % modulus
for i in range(0 ,t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowercase : Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _A ( ) -> None:
lowercase : Dict = "abc1abc12"
lowercase : Union[str, Any] = "alskfjaldsabc1abc1abc12k23adsfabcabc"
lowercase : Any = "alskfjaldsk23adsfabcabc"
assert rabin_karp(A ,A ) and not rabin_karp(A ,A )
# Test 2)
lowercase : str = "ABABX"
lowercase : Union[str, Any] = "ABABZABABYABABX"
assert rabin_karp(A ,A )
# Test 3)
lowercase : str = "AAAB"
lowercase : List[str] = "ABAAAAAB"
assert rabin_karp(A ,A )
# Test 4)
lowercase : List[str] = "abcdabcy"
lowercase : str = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(A ,A )
# Test 5)
lowercase : int = "Lü"
lowercase : Optional[Any] = "Lüsai"
assert rabin_karp(A ,A )
lowercase : Tuple = "Lue"
assert not rabin_karp(A ,A )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 372 | 0 |
'''simple docstring'''
from __future__ import annotations
UpperCAmelCase__ : str = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def A ( snake_case__ : Matrix , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def A ( snake_case__ : Matrix ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def A ( snake_case__ : Matrix ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(snake_case__ ):
__snake_case , __snake_case = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
__snake_case = digit
if sudoku(snake_case__ ) is not None:
return grid
__snake_case = 0
return None
def A ( snake_case__ : Matrix ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(snake_case__ , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
UpperCAmelCase__ : Union[str, Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 704 |
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations(snake_case__ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
snake_case__ : int , snake_case__ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__snake_case = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__snake_case = answer
return answer
__snake_case = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
__snake_case = [0] * (target + 1)
__snake_case = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : Optional[int] = 5
UpperCAmelCase__ : Tuple = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 676 | 0 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class __A ( nn.Module ):
'''simple docstring'''
def __init__(self , A = 16 , A = 88 , A = None , A = 1 , A = 0.0 , A = 32 , A = None , A = False , A = None , A = None , A = "geglu" , A = None , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
_a = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=A , attention_head_dim=A , in_channels=A , num_layers=A , dropout=A , norm_num_groups=A , cross_attention_dim=A , attention_bias=A , sample_size=A , num_vector_embeds=A , activation_fn=A , num_embeds_ada_norm=A , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_a = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_a = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_a = [1, 0]
def a__ (self , A , A , A=None , A=None , A=None , A = True , ) -> int:
"""simple docstring"""
_a = hidden_states
_a = []
_a = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_a = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_a = self.transformer_index_for_condition[i]
_a = self.transformers[transformer_index](
A , encoder_hidden_states=A , timestep=A , cross_attention_kwargs=A , return_dict=A , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_a = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_a = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=A )
| 11 | """simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[Any] , _snake_case : str ) -> List[str]:
with open(_snake_case , encoding="utf-8" ) as input_file:
SCREAMING_SNAKE_CASE__ = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
SCREAMING_SNAKE_CASE__ = input_file.read()
SCREAMING_SNAKE_CASE__ = regexp.search(_snake_case )
return match
def lowerCAmelCase_ ( self : str , _snake_case : str ) -> List[str]:
with open(_snake_case , encoding="utf-8" ) as input_file:
SCREAMING_SNAKE_CASE__ = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
SCREAMING_SNAKE_CASE__ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
SCREAMING_SNAKE_CASE__ = regexp.finditer(_snake_case )
SCREAMING_SNAKE_CASE__ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def lowerCAmelCase_ ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE__ = Path("./datasets" )
SCREAMING_SNAKE_CASE__ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_snake_case ) ):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" )
def lowerCAmelCase_ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = Path("./datasets" )
SCREAMING_SNAKE_CASE__ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(_snake_case ) ):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 159 | 0 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
UpperCamelCase_ = Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
UpperCamelCase_ = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
UpperCamelCase_ = "zero2"
UpperCamelCase_ = "zero3"
UpperCamelCase_ = [ZEROa, ZEROa]
def _UpperCAmelCase ( UpperCamelCase: List[str] , UpperCamelCase: Any , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
__lowerCAmelCase = parameterized.to_safe_name("_".join(str(UpperCamelCase ) for x in param.args ) )
return F"{func.__name__}_{param_based_name}"
# Cartesian-product of zero stages with models to test
UpperCamelCase_ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class a ( __UpperCAmelCase ):
@parameterized.expand(snake_case__ , name_func=snake_case__ )
def UpperCAmelCase__ ( self : Dict , snake_case__ : Any , snake_case__ : Any ):
"""simple docstring"""
self.run_and_check(
stage=snake_case__ , model=snake_case__ , distributed=snake_case__ , fpaa=snake_case__ , )
@require_torch_multi_gpu
@parameterized.expand(snake_case__ , name_func=snake_case__ )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[int] ):
"""simple docstring"""
self.run_and_check(
stage=snake_case__ , model=snake_case__ , distributed=snake_case__ , fpaa=snake_case__ , )
@parameterized.expand(snake_case__ , name_func=snake_case__ )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
self.run_and_check(
stage=snake_case__ , model=snake_case__ , distributed=snake_case__ , fpaa=snake_case__ , )
@require_torch_multi_gpu
@parameterized.expand(snake_case__ , name_func=snake_case__ )
def UpperCAmelCase__ ( self : Dict , snake_case__ : List[str] , snake_case__ : Dict ):
"""simple docstring"""
self.run_and_check(
stage=snake_case__ , model=snake_case__ , distributed=snake_case__ , fpaa=snake_case__ , )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Any ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Any , snake_case__ : str , snake_case__ : str , snake_case__ : int = 10 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : bool = True , ):
"""simple docstring"""
__lowerCAmelCase = models[model]
__lowerCAmelCase = self.run_trainer(
stage=snake_case__ , model_name=snake_case__ , eval_steps=snake_case__ , num_train_epochs=1 , distributed=snake_case__ , fpaa=snake_case__ , )
self.do_checks(snake_case__ )
return output_dir
def UpperCAmelCase__ ( self : Dict , snake_case__ : str , snake_case__ : str , snake_case__ : int = 10 , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : bool = True , ):
"""simple docstring"""
__lowerCAmelCase = self.get_auto_remove_tmp_dir("./xxx" , after=snake_case__ )
__lowerCAmelCase = F"\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(snake_case__ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n ".split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__lowerCAmelCase = F"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json".split()
__lowerCAmelCase = [F"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"]
__lowerCAmelCase = self.get_launcher(snake_case__ )
__lowerCAmelCase = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(snake_case__ , env=self.get_env() )
return output_dir
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Optional[Any]=False ):
"""simple docstring"""
__lowerCAmelCase = min(2 , get_gpu_count() ) if distributed else 1
return F"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()
| 376 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class a ( __UpperCAmelCase ):
lowercase_ : BigBirdConfig
lowercase_ : jnp.dtype = jnp.floataa
lowercase_ : bool = True
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
super().setup()
__lowerCAmelCase = nn.Dense(5 , dtype=self.dtype )
def __call__( self : Optional[Any] , *snake_case__ : List[str] , **snake_case__ : str ):
"""simple docstring"""
__lowerCAmelCase = super().__call__(*snake_case__ , **snake_case__ )
__lowerCAmelCase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class a ( __UpperCAmelCase ):
lowercase_ : List[str] = FlaxBigBirdForNaturalQuestionsModule
def _UpperCAmelCase ( UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int ):
"""simple docstring"""
def cross_entropy(UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Optional[int]=None ):
__lowerCAmelCase = logits.shape[-1]
__lowerCAmelCase = (labels[..., None] == jnp.arange(UpperCamelCase )[None]).astype("f4" )
__lowerCAmelCase = jax.nn.log_softmax(UpperCamelCase , axis=-1 )
__lowerCAmelCase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
__lowerCAmelCase = reduction(UpperCamelCase )
return loss
__lowerCAmelCase = partial(UpperCamelCase , reduction=jnp.mean )
__lowerCAmelCase = cross_entropy(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = cross_entropy(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = cross_entropy(UpperCamelCase , UpperCamelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class a :
lowercase_ : str = "google/bigbird-roberta-base"
lowercase_ : int = 3_000
lowercase_ : int = 10_500
lowercase_ : int = 128
lowercase_ : int = 3
lowercase_ : int = 1
lowercase_ : int = 5
# tx_args
lowercase_ : float = 3e-5
lowercase_ : float = 0.0
lowercase_ : int = 20_000
lowercase_ : float = 0.0095
lowercase_ : str = "bigbird-roberta-natural-questions"
lowercase_ : str = "training-expt"
lowercase_ : str = "data/nq-training.jsonl"
lowercase_ : str = "data/nq-validation.jsonl"
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=snake_case__ )
__lowerCAmelCase = os.path.join(self.base_dir , self.save_dir )
__lowerCAmelCase = self.batch_size_per_device * jax.device_count()
@dataclass
class a :
lowercase_ : int
lowercase_ : int = 4_096 # no dynamic padding on TPUs
def __call__( self : List[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
__lowerCAmelCase = self.collate_fn(snake_case__ )
__lowerCAmelCase = jax.tree_util.tree_map(snake_case__ , snake_case__ )
return batch
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Dict ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.fetch_inputs(features["input_ids"] )
__lowerCAmelCase = {
"input_ids": jnp.array(snake_case__ , dtype=jnp.intaa ),
"attention_mask": jnp.array(snake_case__ , dtype=jnp.intaa ),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa ),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa ),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa ),
}
return batch
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : list ):
"""simple docstring"""
__lowerCAmelCase = [self._fetch_inputs(snake_case__ ) for ids in input_ids]
return zip(*snake_case__ )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : list ):
"""simple docstring"""
__lowerCAmelCase = [1 for _ in range(len(snake_case__ ) )]
while len(snake_case__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _UpperCAmelCase ( UpperCamelCase: Tuple , UpperCamelCase: Dict , UpperCamelCase: Optional[Any]=None ):
"""simple docstring"""
if seed is not None:
__lowerCAmelCase = dataset.shuffle(seed=UpperCamelCase )
for i in range(len(UpperCamelCase ) // batch_size ):
__lowerCAmelCase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCamelCase )
@partial(jax.pmap , axis_name="batch" )
def _UpperCAmelCase ( UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , **UpperCamelCase: List[str] ):
"""simple docstring"""
def loss_fn(UpperCamelCase: Dict ):
__lowerCAmelCase = model_inputs.pop("start_labels" )
__lowerCAmelCase = model_inputs.pop("end_labels" )
__lowerCAmelCase = model_inputs.pop("pooled_labels" )
__lowerCAmelCase = state.apply_fn(**UpperCamelCase , params=UpperCamelCase , dropout_rng=UpperCamelCase , train=UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = outputs
return state.loss_fn(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
__lowerCAmelCase , __lowerCAmelCase = jax.random.split(UpperCamelCase )
__lowerCAmelCase = jax.value_and_grad(UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase = grad_fn(state.params )
__lowerCAmelCase = jax.lax.pmean({"loss": loss} , axis_name="batch" )
__lowerCAmelCase = jax.lax.pmean(UpperCamelCase , "batch" )
__lowerCAmelCase = state.apply_gradients(grads=UpperCamelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def _UpperCAmelCase ( UpperCamelCase: Optional[Any] , **UpperCamelCase: List[str] ):
"""simple docstring"""
__lowerCAmelCase = model_inputs.pop("start_labels" )
__lowerCAmelCase = model_inputs.pop("end_labels" )
__lowerCAmelCase = model_inputs.pop("pooled_labels" )
__lowerCAmelCase = state.apply_fn(**UpperCamelCase , params=state.params , train=UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = outputs
__lowerCAmelCase = state.loss_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class a ( train_state.TrainState ):
lowercase_ : Callable = struct.field(pytree_node=__UpperCAmelCase )
@dataclass
class a :
lowercase_ : Args
lowercase_ : Callable
lowercase_ : Callable
lowercase_ : Callable
lowercase_ : Callable
lowercase_ : wandb
lowercase_ : Callable = None
def UpperCAmelCase__ ( self : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : str=None ):
"""simple docstring"""
__lowerCAmelCase = model.params
__lowerCAmelCase = TrainState.create(
apply_fn=model.__call__ , params=snake_case__ , tx=snake_case__ , loss_fn=snake_case__ , )
if ckpt_dir is not None:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = restore_checkpoint(snake_case__ , snake_case__ )
__lowerCAmelCase = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
__lowerCAmelCase , __lowerCAmelCase = build_tx(**snake_case__ )
__lowerCAmelCase = train_state.TrainState(
step=snake_case__ , apply_fn=model.__call__ , params=snake_case__ , tx=snake_case__ , opt_state=snake_case__ , )
__lowerCAmelCase = args
__lowerCAmelCase = data_collator
__lowerCAmelCase = lr
__lowerCAmelCase = params
__lowerCAmelCase = jax_utils.replicate(snake_case__ )
return state
def UpperCAmelCase__ ( self : str , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Any ):
"""simple docstring"""
__lowerCAmelCase = self.args
__lowerCAmelCase = len(snake_case__ ) // args.batch_size
__lowerCAmelCase = jax.random.PRNGKey(0 )
__lowerCAmelCase = jax.random.split(snake_case__ , jax.device_count() )
for epoch in range(args.max_epochs ):
__lowerCAmelCase = jnp.array(0 , dtype=jnp.floataa )
__lowerCAmelCase = get_batched_dataset(snake_case__ , args.batch_size , seed=snake_case__ )
__lowerCAmelCase = 0
for batch in tqdm(snake_case__ , total=snake_case__ , desc=F"Running EPOCH-{epoch}" ):
__lowerCAmelCase = self.data_collator(snake_case__ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.train_step_fn(snake_case__ , snake_case__ , **snake_case__ )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
__lowerCAmelCase = jax_utils.unreplicate(state.step )
__lowerCAmelCase = running_loss.item() / i
__lowerCAmelCase = self.scheduler_fn(state_step - 1 )
__lowerCAmelCase = self.evaluate(snake_case__ , snake_case__ )
__lowerCAmelCase = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(snake_case__ ) )
self.logger.log(snake_case__ , commit=snake_case__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"-e{epoch}-s{i}" , state=snake_case__ )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Any , snake_case__ : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = get_batched_dataset(snake_case__ , self.args.batch_size )
__lowerCAmelCase = len(snake_case__ ) // self.args.batch_size
__lowerCAmelCase = jnp.array(0 , dtype=jnp.floataa )
__lowerCAmelCase = 0
for batch in tqdm(snake_case__ , total=snake_case__ , desc="Evaluating ... " ):
__lowerCAmelCase = self.data_collator(snake_case__ )
__lowerCAmelCase = self.val_step_fn(snake_case__ , **snake_case__ )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
__lowerCAmelCase = jax_utils.unreplicate(snake_case__ )
print(F"SAVING CHECKPOINT IN {save_dir}" , end=" ... " )
self.model_save_fn(snake_case__ , params=state.params )
with open(os.path.join(snake_case__ , "opt_state.msgpack" ) , "wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(snake_case__ , "args.joblib" ) )
joblib.dump(self.data_collator , os.path.join(snake_case__ , "data_collator.joblib" ) )
with open(os.path.join(snake_case__ , "training_state.json" ) , "w" ) as f:
json.dump({"step": state.step.item()} , snake_case__ )
print("DONE" )
def _UpperCAmelCase ( UpperCamelCase: str , UpperCamelCase: List[Any] ):
"""simple docstring"""
print(F"RESTORING CHECKPOINT FROM {save_dir}" , end=" ... " )
with open(os.path.join(UpperCamelCase , "flax_model.msgpack" ) , "rb" ) as f:
__lowerCAmelCase = from_bytes(state.params , f.read() )
with open(os.path.join(UpperCamelCase , "opt_state.msgpack" ) , "rb" ) as f:
__lowerCAmelCase = from_bytes(state.opt_state , f.read() )
__lowerCAmelCase = joblib.load(os.path.join(UpperCamelCase , "args.joblib" ) )
__lowerCAmelCase = joblib.load(os.path.join(UpperCamelCase , "data_collator.joblib" ) )
with open(os.path.join(UpperCamelCase , "training_state.json" ) , "r" ) as f:
__lowerCAmelCase = json.load(UpperCamelCase )
__lowerCAmelCase = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def _UpperCAmelCase ( UpperCamelCase: Any , UpperCamelCase: Dict , UpperCamelCase: Tuple , UpperCamelCase: Dict ):
"""simple docstring"""
__lowerCAmelCase = num_train_steps - warmup_steps
__lowerCAmelCase = optax.linear_schedule(init_value=UpperCamelCase , end_value=UpperCamelCase , transition_steps=UpperCamelCase )
__lowerCAmelCase = optax.linear_schedule(init_value=UpperCamelCase , end_value=1e-7 , transition_steps=UpperCamelCase )
__lowerCAmelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _UpperCAmelCase ( UpperCamelCase: Union[str, Any] , UpperCamelCase: str , UpperCamelCase: List[Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
def weight_decay_mask(UpperCamelCase: int ):
__lowerCAmelCase = traverse_util.flatten_dict(UpperCamelCase )
__lowerCAmelCase = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCamelCase )
__lowerCAmelCase = scheduler_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = optax.adamw(learning_rate=UpperCamelCase , weight_decay=UpperCamelCase , mask=UpperCamelCase )
return tx, lr
| 376 | 1 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCamelCase (TensorFormatter[Mapping, 'torch.Tensor', Mapping] ):
def __init__( self : Optional[int] , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : int ) -> Dict:
super().__init__(features=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch_tensor_kwargs
import torch # noqa import torch at initialization
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Dict ) -> Union[str, Any]:
import torch
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and column:
if all(
isinstance(__UpperCAmelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(__UpperCAmelCase )
return column
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : List[Any] ) -> str:
import torch
if isinstance(__UpperCAmelCase , (str, bytes, type(__UpperCAmelCase )) ):
return value
elif isinstance(__UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE__ = {}
if isinstance(__UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
SCREAMING_SNAKE_CASE__ = {"""dtype""": torch.intaa}
elif isinstance(__UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE__ = {"""dtype""": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__UpperCAmelCase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = np.asarray(__UpperCAmelCase )
return torch.tensor(__UpperCAmelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
import torch
# support for torch, tf, jax etc.
if hasattr(__UpperCAmelCase , """__array__""" ) and not isinstance(__UpperCAmelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(__UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : dict ) -> Optional[Any]:
return map_nested(self._recursive_tensorize , __UpperCAmelCase , map_list=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : pa.Table ) -> Mapping:
SCREAMING_SNAKE_CASE__ = self.numpy_arrow_extractor().extract_row(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.python_features_decoder.decode_row(__UpperCAmelCase )
return self.recursive_tensorize(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : pa.Table ) -> "torch.Tensor":
SCREAMING_SNAKE_CASE__ = self.numpy_arrow_extractor().extract_column(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.python_features_decoder.decode_column(__UpperCAmelCase , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE__ = self.recursive_tensorize(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self._consolidate(__UpperCAmelCase )
return column
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : pa.Table ) -> Mapping:
SCREAMING_SNAKE_CASE__ = self.numpy_arrow_extractor().extract_batch(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.python_features_decoder.decode_batch(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.recursive_tensorize(__UpperCAmelCase )
for column_name in batch:
SCREAMING_SNAKE_CASE__ = self._consolidate(batch[column_name] )
return batch
| 196 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
def __init__( self : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict=1_3 , __UpperCAmelCase : Optional[Any]=3_0 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : List[Any]=3_2 , __UpperCAmelCase : int=5 , __UpperCAmelCase : Union[str, Any]=4 , __UpperCAmelCase : Union[str, Any]=3_7 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : Optional[Any]=1_0 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Any=0.6 , __UpperCAmelCase : Dict=None , ) -> str:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = mask_ratio
SCREAMING_SNAKE_CASE__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = ViTMAEModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ = ViTMAEForPreTraining(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = (self.image_size // self.patch_size) ** 2
SCREAMING_SNAKE_CASE__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = ViTMAEForPreTraining(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (A__ ,A__ ,unittest.TestCase ):
lowerCamelCase__ : Optional[int] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCamelCase__ : str = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : str = False
lowerCamelCase__ : List[Any] = False
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = ViTMAEModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : int , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] ) -> List[Any]:
# make masks reproducible
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
SCREAMING_SNAKE_CASE__ = torch.from_numpy(__UpperCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
SCREAMING_SNAKE_CASE__ = pt_noise
super().check_pt_tf_models(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = outputs[0].cpu().numpy()
SCREAMING_SNAKE_CASE__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(__UpperCAmelCase )
model.to(__UpperCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
# Make sure we don't have nans
SCREAMING_SNAKE_CASE__ = after_outputs[0].cpu().numpy()
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCAmelCase , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
pass
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = ViTMAEModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
SCREAMING_SNAKE_CASE__ = ViTMAEConfig()
SCREAMING_SNAKE_CASE__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase , noise=torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase ) )
# verify the logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCAmelCase ) , atol=1e-4 ) )
| 196 | 1 |
"""simple docstring"""
from math import isclose, sqrt
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : int ) ->tuple[float, float, float]:
A__ : List[str] = point_y / 4 / point_x
A__ : Union[str, Any] = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
A__ : int = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
A__ : Optional[Any] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
A__ : List[Any] = outgoing_gradient**2 + 4
A__ : Dict = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
A__ : Any = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
A__ : str = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
A__ : Tuple = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
A__ : Optional[int] = x_minus if isclose(__snake_case, __snake_case ) else x_plus
A__ : str = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _lowerCAmelCase ( UpperCAmelCase__ : Any = 1.4, UpperCAmelCase__ : Union[str, Any] = -9.6 ) ->int:
A__ : List[str] = 0
A__ : List[Any] = first_x_coord
A__ : Dict = first_y_coord
A__ : Any = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
A__ , A__ , A__ : Tuple = next_point(__snake_case, __snake_case, __snake_case )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'{solution() = }')
| 707 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : List[Any] ) ->List[Any]:
A__ : Union[str, Any] = [1]
for i in range(2, UpperCAmelCase__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
A__ : Optional[int] = []
A__ : List[str] = list(range(UpperCAmelCase__ ) )
# Find permutation
while factorials:
A__ : Optional[int] = factorials.pop()
A__ , A__ : Optional[int] = divmod(UpperCAmelCase__, UpperCAmelCase__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 498 | 0 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_lowercase = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
def __init__( self , **a__):
requires_backends(self , ['''bs4'''])
super().__init__(**a__)
def snake_case_ ( self , a__):
A__ = []
A__ = []
A__ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
A__ = parent.find_all(child.name , recursive=a__)
xpath_tags.append(child.name)
xpath_subscripts.append(
0 if 1 == len(a__) else next(i for i, s in enumerate(a__ , 1) if s is child))
A__ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def snake_case_ ( self , a__):
A__ = BeautifulSoup(a__ , '''html.parser''')
A__ = []
A__ = []
A__ = []
for element in html_code.descendants:
if type(a__) == bsa.element.NavigableString:
if type(element.parent) != bsa.element.Tag:
continue
A__ = html.unescape(a__).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(a__)
A__ , A__ = self.xpath_soup(a__)
stringaxtag_seq.append(a__)
stringaxsubs_seq.append(a__)
if len(a__) != len(a__):
raise ValueError('''Number of doc strings and xtags does not correspond''')
if len(a__) != len(a__):
raise ValueError('''Number of doc strings and xsubs does not correspond''')
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def snake_case_ ( self , a__ , a__):
A__ = ''''''
for tagname, subs in zip(a__ , a__):
xpath += F"/{tagname}"
if subs != 0:
xpath += F"[{subs}]"
return xpath
def __call__( self , a__):
A__ = False
# Check that strings has a valid type
if isinstance(a__ , a__):
A__ = True
elif isinstance(a__ , (list, tuple)):
if len(a__) == 0 or isinstance(html_strings[0] , a__):
A__ = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F"but is of type {type(a__)}.")
A__ = bool(isinstance(a__ , (list, tuple)) and (isinstance(html_strings[0] , a__)))
if not is_batched:
A__ = [html_strings]
# Get nodes + xpaths
A__ = []
A__ = []
for html_string in html_strings:
A__ , A__ , A__ = self.get_three_from_single(a__)
nodes.append(a__)
A__ = []
for node, tag_list, sub_list in zip(a__ , a__ , a__):
A__ = self.construct_xpath(a__ , a__)
xpath_strings.append(a__)
xpaths.append(a__)
# return as Dict
A__ = {'''nodes''': nodes, '''xpaths''': xpaths}
A__ = BatchFeature(data=a__ , tensor_type=a__)
return encoded_inputs
| 632 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
_lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 632 | 1 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__snake_case = '''pt'''
elif is_tf_available():
__snake_case = '''tf'''
else:
__snake_case = '''jax'''
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = PerceiverTokenizer
_a = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase__ :Any = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=20 , UpperCamelCase_=5 ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = []
for i in range(len(UpperCamelCase_ ) ):
try:
UpperCamelCase__ :Tuple = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCamelCase__ :Optional[int] = list(filter(lambda UpperCamelCase_ : re.match(r'''^[ a-zA-Z]+$''' , t[1] ) , UpperCamelCase_ ) )
UpperCamelCase__ :Optional[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_ ) , UpperCamelCase_ ) )
if max_length is not None and len(UpperCamelCase_ ) > max_length:
UpperCamelCase__ :int = toks[:max_length]
if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0:
while len(UpperCamelCase_ ) < min_length:
UpperCamelCase__ :Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
UpperCamelCase__ :List[str] = [t[0] for t in toks]
# Ensure consistency
UpperCamelCase__ :List[str] = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
if " " not in output_txt and len(UpperCamelCase_ ) > 1:
UpperCamelCase__ :int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ )
)
if with_prefix_space:
UpperCamelCase__ :List[Any] = ''' ''' + output_txt
UpperCamelCase__ :Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
return output_txt, output_ids
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = self.perceiver_tokenizer
UpperCamelCase__ :int = '''Unicode €.'''
UpperCamelCase__ :Union[str, Any] = tokenizer(UpperCamelCase_ )
UpperCamelCase__ :List[str] = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , UpperCamelCase_ )
# decoding
UpperCamelCase__ :Optional[Any] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , '''[CLS]Unicode €.[SEP]''' )
UpperCamelCase__ :Optional[Any] = tokenizer('''e è é ê ë''' )
UpperCamelCase__ :Dict = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , UpperCamelCase_ )
# decoding
UpperCamelCase__ :Tuple = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.perceiver_tokenizer
UpperCamelCase__ :Union[str, Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
UpperCamelCase__ :Tuple = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
UpperCamelCase__ :Any = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
if FRAMEWORK != "jax":
UpperCamelCase__ :Dict = list(batch.input_ids.numpy()[0] )
else:
UpperCamelCase__ :Optional[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = self.perceiver_tokenizer
UpperCamelCase__ :List[str] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCamelCase__ :int = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , UpperCamelCase_ )
self.assertIn('''attention_mask''' , UpperCamelCase_ )
self.assertNotIn('''decoder_input_ids''' , UpperCamelCase_ )
self.assertNotIn('''decoder_attention_mask''' , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.perceiver_tokenizer
UpperCamelCase__ :Tuple = [
'''Summary of the text.''',
'''Another summary.''',
]
UpperCamelCase__ :Union[str, Any] = tokenizer(
text_target=UpperCamelCase_ , max_length=32 , padding='''max_length''' , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCamelCase__ :List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCamelCase__ :str = tempfile.mkdtemp()
UpperCamelCase__ :List[Any] = ''' He is very happy, UNwant\u00E9d,running'''
UpperCamelCase__ :Any = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
UpperCamelCase__ :List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
UpperCamelCase__ :int = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
shutil.rmtree(UpperCamelCase_ )
UpperCamelCase__ :Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCamelCase__ :Optional[int] = tempfile.mkdtemp()
UpperCamelCase__ :Union[str, Any] = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
UpperCamelCase__ :str = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
UpperCamelCase__ :Optional[int] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCamelCase__ :Union[str, Any] = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
UpperCamelCase__ :str = json.load(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
UpperCamelCase__ :List[Any] = json.load(UpperCamelCase_ )
UpperCamelCase__ :Dict = [F'''<extra_id_{i}>''' for i in range(125 )]
UpperCamelCase__ :int = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
UpperCamelCase__ :Union[str, Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(UpperCamelCase_ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCamelCase__ :Any = tokenizer_class.from_pretrained(
UpperCamelCase_ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCamelCase__ :str = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=UpperCamelCase_ )]
UpperCamelCase__ :Tuple = tokenizer_class.from_pretrained(
UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
UpperCamelCase__ :Dict = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
UpperCamelCase__ :Optional[int] = tokenizer.convert_tokens_to_string(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) | 280 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 280 | 1 |
import sys
__A = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __a ( lowerCAmelCase_ : str = N ) -> str:
'''simple docstring'''
UpperCAmelCase_= -sys.maxsize - 1
for i in range(len(lowerCAmelCase_ ) - 12 ):
UpperCAmelCase_= 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
UpperCAmelCase_= product
return largest_product
if __name__ == "__main__":
print(f'{solution() = }')
| 593 |
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_snake_case = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
_snake_case = {'''allegro/herbert-base-cased''': 5_1_4}
_snake_case = {}
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_: Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_: int = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_: List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_: Optional[Any] = HerbertTokenizer
def __init__( self : str , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Any]="<s>" , UpperCAmelCase_ : Tuple="<unk>" , UpperCAmelCase_ : Tuple="<pad>" , UpperCAmelCase_ : int="<mask>" , UpperCAmelCase_ : Tuple="</s>" , **UpperCAmelCase_ : Dict , ) -> List[Any]:
"""simple docstring"""
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , **UpperCAmelCase_ , )
def __lowerCamelCase ( self : int , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_lowerCAmelCase = [self.cls_token_id]
_lowerCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1]
def __lowerCamelCase ( self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
| 580 | 0 |
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
a__ = HfApi()
a__ = {}
# fmt: off
a__ = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
a__ = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
a__ = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
a__ = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
a__ = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
a__ = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
a__ = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
a__ = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
a__ = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
a__ = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
a__ = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
a__ = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
a__ = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
a__ = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
a__ = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
a__ = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
a__ = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F"Started running {mod.modelId}!!!")
if mod.modelId.startswith('''CompVis'''):
a__ = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
a__ = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
a__ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
a__ = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
a__ = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F"{mod.modelId} has passed successfully!!!")
| 711 |
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
a__ = pd.read_csv(
'''https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'''
'''position_salaries.csv'''
)
a__ = dataset.iloc[:, 1:2].values
a__ = dataset.iloc[:, 2].values
a__ , a__ , a__ , a__ = train_test_split(X, y, test_size=0.2, random_state=0)
a__ = PolynomialFeatures(degree=4)
a__ = poly_reg.fit_transform(X)
a__ = LinearRegression()
pol_reg.fit(X_poly, y)
def snake_case__ ( ) -> int:
'''simple docstring'''
plt.scatter(a , a , color="""red""" )
plt.plot(a , pol_reg.predict(poly_reg.fit_transform(a ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003 | 566 | 0 |
"""simple docstring"""
from __future__ import annotations
_snake_case = [True] * 1_0_0_0_0_0_1
_snake_case = 2
while i * i <= 1_0_0_0_0_0_0:
if seive[i]:
for j in range(i * i, 1_0_0_0_0_0_1, i):
_snake_case = False
i += 1
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
return seive[n]
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
return any(digit in '02468' for digit in str(SCREAMING_SNAKE_CASE ) )
def __snake_case ( SCREAMING_SNAKE_CASE: int = 100_0000 ):
"""simple docstring"""
_lowerCAmelCase = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(SCREAMING_SNAKE_CASE ) and not contains_an_even_digit(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = str(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = [int(str_num[j:] + str_num[:j] ) for j in range(len(SCREAMING_SNAKE_CASE ) )]
if all(is_prime(SCREAMING_SNAKE_CASE ) for i in list_nums ):
result.append(SCREAMING_SNAKE_CASE )
return result
def __snake_case ( ):
"""simple docstring"""
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'{len(find_circular_primes()) = }')
| 580 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
_lowerCAmelCase = int(number**0.5 )
return number == sq * sq
def __snake_case ( SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
_lowerCAmelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_lowerCAmelCase = x_den * y_den * z_den
_lowerCAmelCase = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def __snake_case ( SCREAMING_SNAKE_CASE: int = 35 ):
"""simple docstring"""
_lowerCAmelCase = set()
_lowerCAmelCase = 42
_lowerCAmelCase = Fraction(0 )
_lowerCAmelCase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_lowerCAmelCase = x_num * y_den + x_den * y_num
_lowerCAmelCase = x_den * y_den
_lowerCAmelCase = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowerCAmelCase = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=2
_lowerCAmelCase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_lowerCAmelCase = x_den * x_den * y_den * y_den
if is_sq(SCREAMING_SNAKE_CASE ) and is_sq(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = int(sqrt(SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase = int(sqrt(SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowerCAmelCase = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=-1
_lowerCAmelCase = x_num * y_num
_lowerCAmelCase = x_den * y_num + x_num * y_den
_lowerCAmelCase = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowerCAmelCase = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=2
_lowerCAmelCase = x_num * x_num * y_num * y_num
_lowerCAmelCase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(SCREAMING_SNAKE_CASE ) and is_sq(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = int(sqrt(SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase = int(sqrt(SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowerCAmelCase = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'{solution() = }')
| 580 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Union[str, Any] = [2, 2, 6, 2] if """tiny""" in model_name else [2, 2, 18, 2]
snake_case__ : str = True if """large""" in model_name or """huge""" in model_name else False
snake_case__ : Optional[int] = True if """large""" in model_name or """huge""" in model_name else False
snake_case__ : List[Any] = True if """large""" in model_name or """huge""" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
snake_case__ : Union[str, Any] = [3, 3, 3, 3]
snake_case__ : Union[str, Any] = [5, 5, 5, 5]
elif "fl4" in model_name:
snake_case__ : List[Any] = [4, 4, 4, 4]
snake_case__ : Any = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
snake_case__ : Optional[Any] = [3, 3, 3, 3]
if "lrf" in model_name:
snake_case__ : Optional[int] = [3, 3, 3, 3]
else:
snake_case__ : Optional[int] = [2, 2, 2, 2]
if "tiny" in model_name:
snake_case__ : Tuple = 96
elif "small" in model_name:
snake_case__ : List[Any] = 96
elif "base" in model_name:
snake_case__ : List[Any] = 128
elif "large" in model_name:
snake_case__ : int = 192
elif "xlarge" in model_name:
snake_case__ : Optional[Any] = 256
elif "huge" in model_name:
snake_case__ : Union[str, Any] = 352
# set label information
snake_case__ : List[Any] = """huggingface/label-files"""
if "large" in model_name or "huge" in model_name:
snake_case__ : Union[str, Any] = """imagenet-22k-id2label.json"""
else:
snake_case__ : Any = """imagenet-1k-id2label.json"""
snake_case__ : int = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[str] = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : Optional[int] = {v: k for k, v in idalabel.items()}
snake_case__ : Dict = FocalNetConfig(
embed_dim=UpperCAmelCase , depths=UpperCAmelCase , focal_levels=UpperCAmelCase , focal_windows=UpperCAmelCase , use_conv_embed=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase , use_post_layernorm=UpperCAmelCase , use_layerscale=UpperCAmelCase , )
return config
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
if "patch_embed.proj" in name:
snake_case__ : Dict = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
snake_case__ : Dict = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
snake_case__ : Dict = """encoder.""" + name
if "encoder.layers" in name:
snake_case__ : List[Any] = name.replace("""encoder.layers""" , """encoder.stages""" )
if "downsample.proj" in name:
snake_case__ : Tuple = name.replace("""downsample.proj""" , """downsample.projection""" )
if "blocks" in name:
snake_case__ : Any = name.replace("""blocks""" , """layers""" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
snake_case__ : List[Any] = name.replace("""modulation.f""" , """modulation.projection_in""" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
snake_case__ : Optional[int] = name.replace("""modulation.h""" , """modulation.projection_context""" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
snake_case__ : Optional[Any] = name.replace("""modulation.proj""" , """modulation.projection_out""" )
if name == "norm.weight":
snake_case__ : str = """layernorm.weight"""
if name == "norm.bias":
snake_case__ : List[Any] = """layernorm.bias"""
if "head" in name:
snake_case__ : Any = name.replace("""head""" , """classifier""" )
else:
snake_case__ : str = """focalnet.""" + name
return name
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ):
"""simple docstring"""
snake_case__ : List[str] = {
"""focalnet-tiny""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth""",
"""focalnet-tiny-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth""",
"""focalnet-small""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth""",
"""focalnet-small-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth""",
"""focalnet-base""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth""",
"""focalnet-base-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth""",
"""focalnet-large-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth""",
"""focalnet-large-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth""",
"""focalnet-xlarge-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth""",
"""focalnet-xlarge-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth""",
}
# fmt: on
snake_case__ : Optional[int] = model_name_to_url[model_name]
print("""Checkpoint URL: """ , UpperCAmelCase )
snake_case__ : str = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location="""cpu""" )["""model"""]
# rename keys
for key in state_dict.copy().keys():
snake_case__ : Dict = state_dict.pop(UpperCAmelCase )
snake_case__ : str = val
snake_case__ : Any = get_focalnet_config(UpperCAmelCase )
snake_case__ : Optional[Any] = FocalNetForImageClassification(UpperCAmelCase )
model.eval()
# load state dict
model.load_state_dict(UpperCAmelCase )
# verify conversion
snake_case__ : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : str = BitImageProcessor(
do_resize=UpperCAmelCase , size={"""shortest_edge""": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase , crop_size=224 , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , )
snake_case__ : Dict = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
snake_case__ : Tuple = processor(images=UpperCAmelCase , return_tensors="""pt""" )
snake_case__ : Union[str, Any] = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
snake_case__ : Union[str, Any] = image_transforms(UpperCAmelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCAmelCase , atol=1E-4 )
snake_case__ : str = model(**UpperCAmelCase )
snake_case__ : List[str] = outputs.logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
print("""First values of logits:""" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
snake_case__ : int = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
snake_case__ : int = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
snake_case__ : int = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
snake_case__ : List[Any] = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
snake_case__ : List[str] = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
snake_case__ : Union[str, Any] = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 704 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _A :
'''simple docstring'''
@staticmethod
def __lowerCAmelCase ( *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Union[str, Any] )-> Dict:
pass
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCAmelCase__ = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _A ( unittest.TestCase ):
'''simple docstring'''
_lowercase = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __lowerCAmelCase ( self : Optional[int] , lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : Optional[Any] )-> Optional[int]:
snake_case__ : Union[str, Any] = pipeline(
"""document-question-answering""" , model=lowerCamelCase , tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
snake_case__ : int = INVOICE_URL
snake_case__ : List[Any] = list(zip(*apply_tesseract(load_image(lowerCamelCase ) , lowerCamelCase , """""" ) ) )
snake_case__ : Dict = """What is the placebo?"""
snake_case__ : int = [
{
"""image""": load_image(lowerCamelCase ),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def __lowerCAmelCase ( self : int , lowerCamelCase : str , lowerCamelCase : List[str] )-> Union[str, Any]:
snake_case__ : List[Any] = dqa_pipeline(lowerCamelCase , top_k=2 )
self.assertEqual(
lowerCamelCase , [
[
{"""score""": ANY(lowerCamelCase ), """answer""": ANY(lowerCamelCase ), """start""": ANY(lowerCamelCase ), """end""": ANY(lowerCamelCase )},
{"""score""": ANY(lowerCamelCase ), """answer""": ANY(lowerCamelCase ), """start""": ANY(lowerCamelCase ), """end""": ANY(lowerCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __lowerCAmelCase ( self : int )-> List[Any]:
snake_case__ : List[Any] = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" )
snake_case__ : int = INVOICE_URL
snake_case__ : List[Any] = """How many cats are there?"""
snake_case__ : Optional[int] = [
{"""score""": 0.0_001, """answer""": """oy 2312/2019""", """start""": 38, """end""": 39},
{"""score""": 0.0_001, """answer""": """oy 2312/2019 DUE""", """start""": 38, """end""": 40},
]
snake_case__ : Dict = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase , decimals=4 ) , lowerCamelCase )
snake_case__ : str = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase , decimals=4 ) , lowerCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
snake_case__ : Optional[Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
snake_case__ : Union[str, Any] = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(lowerCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
snake_case__ : Tuple = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
snake_case__ : Optional[int] = []
snake_case__ : List[Any] = []
snake_case__ : Tuple = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , words=lowerCamelCase , boxes=lowerCamelCase , top_k=2 )
self.assertEqual(lowerCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __lowerCAmelCase ( self : int )-> Any:
snake_case__ : List[str] = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
snake_case__ : List[Any] = INVOICE_URL
snake_case__ : Optional[Any] = """What is the invoice number?"""
snake_case__ : Any = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
snake_case__ : Union[str, Any] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
snake_case__ : Dict = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 16, """end""": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __lowerCAmelCase ( self : List[Any] )-> Any:
snake_case__ : Optional[Any] = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=50 , )
snake_case__ : Dict = INVOICE_URL
snake_case__ : Tuple = """What is the invoice number?"""
snake_case__ : Optional[Any] = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
snake_case__ : int = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
snake_case__ : List[Any] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __lowerCAmelCase ( self : List[str] )-> Dict:
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=lowerCamelCase )
snake_case__ : Tuple = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=lowerCamelCase , revision="""3dc6de3""" , )
snake_case__ : Optional[int] = INVOICE_URL
snake_case__ : Union[str, Any] = """What is the invoice number?"""
snake_case__ : Dict = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
snake_case__ : int = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
snake_case__ : int = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
]
]
* 2 , )
snake_case__ : Tuple = list(zip(*apply_tesseract(load_image(lowerCamelCase ) , lowerCamelCase , """""" ) ) )
# This model should also work if `image` is set to None
snake_case__ : Dict = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __lowerCAmelCase ( self : int )-> str:
snake_case__ : Dict = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=lowerCamelCase )
snake_case__ : List[Any] = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=lowerCamelCase , revision="""3dc6de3""" , max_seq_len=50 , )
snake_case__ : Any = INVOICE_URL
snake_case__ : List[Any] = """What is the invoice number?"""
snake_case__ : int = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
snake_case__ : str = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
snake_case__ : Dict = list(zip(*apply_tesseract(load_image(lowerCamelCase ) , lowerCamelCase , """""" ) ) )
# This model should also work if `image` is set to None
snake_case__ : Optional[Any] = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
@slow
@require_torch
def __lowerCAmelCase ( self : int )-> Tuple:
snake_case__ : Optional[int] = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
snake_case__ : str = INVOICE_URL
snake_case__ : Tuple = """What is the invoice number?"""
snake_case__ : Tuple = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase , decimals=4 ) , [{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def __lowerCAmelCase ( self : int )-> List[Any]:
pass
| 172 | 0 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
__snake_case : Optional[int] =datasets.logging.get_logger(__name__)
__snake_case : List[Any] ='\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
__snake_case : Dict ='\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
__snake_case : Union[str, Any] ='\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : int ,lowerCamelCase_ : Any=False ,lowerCamelCase_ : Tuple=False ,lowerCamelCase_ : List[Any]=True ,lowerCamelCase_ : Dict=False ,lowerCamelCase_ : Any="dummy_doc"):
'''simple docstring'''
lowerCAmelCase__ : int = {doc: key_lines}
lowerCAmelCase__ : Dict = {doc: sys_lines}
lowerCAmelCase__ : Union[str, Any] = {}
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = reader.get_doc_mentions(lowerCamelCase_ ,key_doc_lines[doc] ,lowerCamelCase_)
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCAmelCase__ : str = reader.set_annotated_parse_trees(lowerCamelCase_ ,key_doc_lines[doc] ,lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = reader.get_doc_mentions(lowerCamelCase_ ,sys_doc_lines[doc] ,lowerCamelCase_)
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCAmelCase__ : List[str] = reader.set_annotated_parse_trees(lowerCamelCase_ ,key_doc_lines[doc] ,lowerCamelCase_ ,lowerCamelCase_)
if remove_nested:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = reader.remove_nested_coref_mentions(lowerCamelCase_ ,lowerCamelCase_)
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCAmelCase__ , lowerCAmelCase__ : str = reader.remove_nested_coref_mentions(lowerCamelCase_ ,lowerCamelCase_)
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCAmelCase__ : Optional[int] = reader.get_mention_assignments(lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : Tuple = reader.get_mention_assignments(lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : int = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""")
logger.info(
'''Number of resulting singleton clusters in the key '''
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""")
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'''files, respectively''')
return doc_coref_infos
def lowerCAmelCase__ ( lowerCamelCase_ : Tuple ,lowerCamelCase_ : List[str] ,lowerCamelCase_ : Any ,lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : Tuple ,lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ : List[str] = get_coref_infos(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : Any = {}
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Dict = 0
for name, metric in metrics:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = evaluator.evaluate_documents(lowerCamelCase_ ,lowerCamelCase_ ,beta=1)
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa})
logger.info(
name.ljust(10) ,f"""Recall: {recall * 100:.2f}""" ,f""" Precision: {precision * 100:.2f}""" ,f""" F1: {fa * 100:.2f}""" ,)
if conll_subparts_num == 3:
lowerCAmelCase__ : str = (conll / 3) * 100
logger.info(f"""CoNLL score: {conll:.2f}""")
output_scores.update({'''conll_score''': conll})
return output_scores
def lowerCAmelCase__ ( lowerCamelCase_ : Any):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = False
for line in key_lines:
if not line.startswith('''#'''):
if len(line.split()) > 6:
lowerCAmelCase__ : Optional[Any] = line.split()[5]
if not parse_col == "-":
lowerCAmelCase__ : str = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) ,codebase_urls=['''https://github.com/ns-moosavi/coval'''] ,reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] ,)
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase=True ,__lowerCamelCase=False ,__lowerCamelCase=False ,__lowerCamelCase=False ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCAmelCase__ : Tuple = util.check_gold_parse_annotation(__lowerCamelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCAmelCase__ : List[Any] = evaluate(
key_lines=__lowerCamelCase ,sys_lines=__lowerCamelCase ,metrics=__lowerCamelCase ,NP_only=__lowerCamelCase ,remove_nested=__lowerCamelCase ,keep_singletons=__lowerCamelCase ,min_span=__lowerCamelCase ,)
return score
| 647 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase=7 ,__lowerCamelCase=3 ,__lowerCamelCase=10 ,__lowerCamelCase=18 ,__lowerCamelCase=30 ,__lowerCamelCase=4_00 ,__lowerCamelCase=True ,__lowerCamelCase=None ,__lowerCamelCase=True ,__lowerCamelCase=[0.5, 0.5, 0.5] ,__lowerCamelCase=[0.5, 0.5, 0.5] ,__lowerCamelCase=None ,) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = size if size is not None else {'''shortest_edge''': 18}
lowerCAmelCase__ : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : Tuple = batch_size
lowerCAmelCase__ : Union[str, Any] = num_channels
lowerCAmelCase__ : str = num_frames
lowerCAmelCase__ : Optional[Any] = image_size
lowerCAmelCase__ : str = min_resolution
lowerCAmelCase__ : Optional[Any] = max_resolution
lowerCAmelCase__ : Optional[Any] = do_resize
lowerCAmelCase__ : List[str] = size
lowerCAmelCase__ : Union[str, Any] = do_normalize
lowerCAmelCase__ : int = image_mean
lowerCAmelCase__ : Optional[int] = image_std
lowerCAmelCase__ : Optional[Any] = crop_size
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =VivitImageProcessor if is_vision_available() else None
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = VivitImageProcessingTester(self )
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase ,'''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''image_std''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''size''' ) )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size ,{'''height''': 18, '''width''': 18} )
lowerCAmelCase__ : Any = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size ,{'''height''': 84, '''width''': 84} )
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
lowerCAmelCase__ : str = prepare_video_inputs(self.image_processor_tester ,equal_resolution=__lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(__lowerCamelCase ,__lowerCamelCase )
self.assertIsInstance(video[0] ,Image.Image )
# Test not batched input
lowerCAmelCase__ : Optional[int] = image_processing(video_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : Optional[int] = image_processing(__lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : Tuple = prepare_video_inputs(self.image_processor_tester ,equal_resolution=__lowerCamelCase ,numpify=__lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(__lowerCamelCase ,__lowerCamelCase )
self.assertIsInstance(video[0] ,np.ndarray )
# Test not batched input
lowerCAmelCase__ : Any = image_processing(video_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : Dict = image_processing(__lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : int = prepare_video_inputs(self.image_processor_tester ,equal_resolution=__lowerCamelCase ,torchify=__lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(__lowerCamelCase ,__lowerCamelCase )
self.assertIsInstance(video[0] ,torch.Tensor )
# Test not batched input
lowerCAmelCase__ : Any = image_processing(video_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : Optional[Any] = image_processing(__lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
| 647 | 1 |
from scipy.stats import spearmanr
import datasets
lowercase__ :Tuple = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
lowercase__ :List[str] = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
lowercase__ :int = R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def A__ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) ,reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] ,)
def A__ ( self ,A__ ,A__ ,A__=False):
lowercase = spearmanr(A__ ,A__)
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 717 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase__ :Optional[Any] = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase__ :List[str] = 10
lowercase__ :Tuple = 256
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS:
return None
lowercase = MinHash(num_perm=lowerCAmelCase__ )
for token in set(lowerCAmelCase__ ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0}
class lowercase :
def __init__( self ,*,
A__ = 0.85 ,):
lowercase = duplication_jaccard_threshold
lowercase = NUM_PERM
lowercase = MinHashLSH(threshold=self._duplication_jaccard_threshold ,num_perm=self._num_perm)
lowercase = defaultdict(A__)
def A__ ( self ,A__ ,A__):
lowercase = self._index.query(A__)
if code_key in self._index.keys:
print(f'Duplicate key {code_key}')
return
self._index.insert(A__ ,A__)
if len(A__) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A__)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A__)
def A__ ( self):
lowercase = []
for base, duplicates in self._duplicate_clusters.items():
lowercase = [base] + list(A__)
# reformat the cluster to be a list of dict
lowercase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A__)
return duplicate_clusters
def A__ ( self ,A__):
lowercase = self.get_duplicate_clusters()
with open(A__ ,'''w''') as f:
json.dump(A__ ,A__)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = element
lowercase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase__ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase__ , lowerCAmelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = get_tokens(lowerCAmelCase__ )
lowercase = get_tokens(lowerCAmelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase__ :List[Any] = None
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = []
for elementa in cluster:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(lowerCAmelCase__ , lowerCAmelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase = 1
extremes.append(lowerCAmelCase__ )
return extremes
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
global _shared_dataset
lowercase = dataset
lowercase = []
lowercase = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase__ , lowerCAmelCase__ , ) , total=len(lowerCAmelCase__ ) , ):
extremes_list.append(lowerCAmelCase__ )
return extremes_list
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.85 ):
'''simple docstring'''
lowercase = make_duplicate_clusters(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
lowercase = {}
lowercase = find_extremes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for extremes in extremes_clusters:
for element in extremes:
lowercase = element
lowercase = duplicate_indices - set(extreme_dict.keys() )
lowercase = dataset.filter(lambda lowerCAmelCase__ , lowerCAmelCase__ : idx not in remove_indices , with_indices=lowerCAmelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
lowercase = extreme_dict[element['''base_index''']]['''copies''']
print(f'Original dataset size: {len(lowerCAmelCase__ )}' )
print(f'Number of duplicate clusters: {len(lowerCAmelCase__ )}' )
print(f'Files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Unique files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Filtered dataset size: {len(lowerCAmelCase__ )}' )
return ds_filter, duplicate_clusters
| 633 | 0 |
'''simple docstring'''
def lowerCamelCase__ ( __lowerCamelCase : list ):
'''simple docstring'''
if len(__snake_case ) < 2:
return collection
def circle_sort_util(__lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int ) -> bool:
_UpperCAmelCase : List[Any] =False
if low == high:
return swapped
_UpperCAmelCase : Any =low
_UpperCAmelCase : str =high
while left < right:
if collection[left] > collection[right]:
_UpperCAmelCase : Optional[Any] =(
collection[right],
collection[left],
)
_UpperCAmelCase : Optional[Any] =True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_UpperCAmelCase : int =(
collection[right + 1],
collection[left],
)
_UpperCAmelCase : Tuple =True
_UpperCAmelCase : int =low + int((high - low) / 2 )
_UpperCAmelCase : str =circle_sort_util(__snake_case , __snake_case , __snake_case )
_UpperCAmelCase : List[Any] =circle_sort_util(__snake_case , mid + 1 , __snake_case )
return swapped or left_swap or right_swap
_UpperCAmelCase : Optional[int] =True
while is_not_sorted is True:
_UpperCAmelCase : List[str] =circle_sort_util(__snake_case , 0 , len(__snake_case ) - 1 )
return collection
if __name__ == "__main__":
lowercase =input('Enter numbers separated by a comma:\n').strip()
lowercase =[int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 446 |
from math import factorial, radians
def lowercase_ ( __snake_case : float , __snake_case : int = 18 , __snake_case : int = 10 ) -> float:
'''simple docstring'''
snake_case__ :Optional[int] = angle_in_degrees - ((angle_in_degrees // 3_6_0.0) * 3_6_0.0)
# Converting from degrees to radians
snake_case__ :Optional[int] = radians(__snake_case )
snake_case__ :Optional[Any] = angle_in_radians
snake_case__ :Optional[int] = 3
snake_case__ :Union[str, Any] = -1
for _ in range(__snake_case ):
result += (b * (angle_in_radians**a)) / factorial(__snake_case )
snake_case__ :Optional[int] = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(__snake_case , __snake_case )
if __name__ == "__main__":
__import__("doctest").testmod() | 241 | 0 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__magic_name__ = logging.get_logger(__name__)
def UpperCAmelCase__( __UpperCAmelCase : Any ):
__snake_case : int = r'\w+[.]\d+'
__snake_case : List[Any] = re.findall(__UpperCAmelCase , __UpperCAmelCase )
for pat in pats:
__snake_case : Any = key.replace(__UpperCAmelCase , '_'.join(pat.split('.' ) ) )
return key
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict ):
__snake_case : str = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__snake_case : int = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__snake_case : Dict = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__snake_case : Tuple = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__snake_case : str = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__snake_case : Any = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__snake_case : Dict = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
__snake_case : Union[str, Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__snake_case : int = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__snake_case : Any = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=42 ):
# Step 1: Convert pytorch tensor to numpy
__snake_case : int = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__snake_case : Any = flax_model.init_weights(PRNGKey(__UpperCAmelCase ) )
__snake_case : Dict = flatten_dict(__UpperCAmelCase )
__snake_case : Any = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__snake_case : str = rename_key(__UpperCAmelCase )
__snake_case : str = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
__snake_case : Any = rename_key_and_reshape_tensor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
__snake_case : Optional[Any] = jnp.asarray(__UpperCAmelCase )
return unflatten_dict(__UpperCAmelCase )
| 714 | from timeit import timeit
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Dict = 0
while number:
number &= number - 1
result += 1
return result
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Tuple = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCAmelCase__( ):
def do_benchmark(__UpperCAmelCase : int ) -> None:
__snake_case : Optional[Any] = 'import __main__ as z'
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=__UpperCAmelCase )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=__UpperCAmelCase , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 679 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : str = u
for i in range(1 , _lowerCAmelCase ):
lowercase__ : List[str] = temp * (u - i)
return temp
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = int(input('enter the numbers of values: ' ) )
lowercase__ : Optional[int] = []
for _ in range(_lowerCAmelCase ):
y.append([] )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
y[i].append(_lowerCAmelCase )
lowercase__ : Optional[int] = 0
print('enter the values of parameters in a list: ' )
lowercase__ : Optional[int] = list(map(_lowerCAmelCase , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(_lowerCAmelCase ):
lowercase__ : Tuple = float(input() )
lowercase__ : List[Any] = int(input('enter the value to interpolate: ' ) )
lowercase__ : Optional[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _lowerCAmelCase ):
for j in range(n - i ):
lowercase__ : str = y[j + 1][i - 1] - y[j][i - 1]
lowercase__ : List[str] = y[0][0]
for i in range(1 , _lowerCAmelCase ):
summ += (ucal(_lowerCAmelCase , _lowerCAmelCase ) * y[0][i]) / math.factorial(_lowerCAmelCase )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 599 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Optional[int] = ['''image_processor''', '''tokenizer''']
lowerCamelCase : Dict = '''BlipImageProcessor'''
lowerCamelCase : List[str] = '''AutoTokenizer'''
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> Dict:
lowerCAmelCase = False
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = self.image_processor
def __call__( self : Union[str, Any] , UpperCAmelCase__ : ImageInput = None , UpperCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase__ : Optional[int] , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
lowerCAmelCase = self.tokenizer
lowerCAmelCase = self.tokenizer(
text=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , )
return text_encoding
# add pixel_values
lowerCAmelCase = self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ )
if text is not None:
lowerCAmelCase = self.tokenizer(
text=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , )
else:
lowerCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase__ )
return encoding_image_processor
def __UpperCAmelCase ( self : List[str] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Tuple ) -> int:
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Dict ) -> Optional[int]:
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __UpperCAmelCase ( self : int ) -> str:
lowerCAmelCase = self.tokenizer.model_input_names
lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 133 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int=7 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Tuple=18 , UpperCamelCase__ : str=30 , UpperCamelCase__ : List[str]=400 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=[0.5, 0.5, 0.5] , UpperCamelCase__ : List[str]=[0.5, 0.5, 0.5] , ) -> List[Any]:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size if size is not None else {"""height""": 18, """width""": 20}
__magic_name__ = do_thumbnail
__magic_name__ = do_align_axis
__magic_name__ = do_pad
__magic_name__ = do_normalize
__magic_name__ = image_mean
__magic_name__ = image_std
def _lowercase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( _A , unittest.TestCase ):
'''simple docstring'''
a__ = DonutImageProcessor if is_vision_available() else None
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = DonutImageProcessingTester(self )
@property
def _lowercase ( self : Dict ) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """size""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_thumbnail""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_align_long_axis""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_pad""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_std""" ) )
def _lowercase ( self : Optional[int] ) -> Any:
"""simple docstring"""
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def _lowercase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
@is_flaky()
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def _lowercase ( self : str ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def _lowercase ( self : Optional[int] ) -> str:
"""simple docstring"""
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 714 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """lilt"""
def __init__( self : Dict , UpperCamelCase__ : List[str]=3_0522 , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=3072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Union[str, Any]=512 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Tuple=1024 , **UpperCamelCase__ : Optional[int] , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = hidden_act
__magic_name__ = intermediate_size
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = position_embedding_type
__magic_name__ = classifier_dropout
__magic_name__ = channel_shrink_ratio
__magic_name__ = max_ad_position_embeddings
| 76 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __a ( _snake_case ):
__UpperCamelCase : List[str] = 'wavlm'
def __init__( self : List[Any] ,lowerCamelCase : List[str]=32 ,lowerCamelCase : Any=768 ,lowerCamelCase : Optional[int]=12 ,lowerCamelCase : Union[str, Any]=12 ,lowerCamelCase : List[str]=3072 ,lowerCamelCase : List[str]="gelu" ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Dict=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Dict=0.0 ,lowerCamelCase : int=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Dict=0.02 ,lowerCamelCase : Optional[int]=1E-5 ,lowerCamelCase : int="group" ,lowerCamelCase : Tuple="gelu" ,lowerCamelCase : Optional[Any]=(512, 512, 512, 512, 512, 512, 512) ,lowerCamelCase : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) ,lowerCamelCase : List[Any]=(10, 3, 3, 3, 3, 2, 2) ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : Tuple=128 ,lowerCamelCase : Union[str, Any]=16 ,lowerCamelCase : int=320 ,lowerCamelCase : Dict=800 ,lowerCamelCase : int=False ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : List[Any]=0.05 ,lowerCamelCase : Dict=10 ,lowerCamelCase : Optional[int]=2 ,lowerCamelCase : List[str]=0.0 ,lowerCamelCase : Optional[int]=10 ,lowerCamelCase : str=320 ,lowerCamelCase : Dict=2 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Dict=100 ,lowerCamelCase : Optional[Any]=256 ,lowerCamelCase : Optional[int]=256 ,lowerCamelCase : Dict=0.1 ,lowerCamelCase : List[str]="mean" ,lowerCamelCase : Dict=False ,lowerCamelCase : int=False ,lowerCamelCase : Tuple=256 ,lowerCamelCase : str=(512, 512, 512, 512, 1500) ,lowerCamelCase : List[Any]=(5, 3, 3, 1, 1) ,lowerCamelCase : List[Any]=(1, 2, 3, 1, 1) ,lowerCamelCase : str=512 ,lowerCamelCase : Optional[Any]=80 ,lowerCamelCase : Optional[int]=0 ,lowerCamelCase : Dict=1 ,lowerCamelCase : Any=2 ,lowerCamelCase : List[str]=False ,lowerCamelCase : Optional[Any]=3 ,lowerCamelCase : int=2 ,lowerCamelCase : int=3 ,lowerCamelCase : List[Any]=None ,**lowerCamelCase : str ,):
'''simple docstring'''
super().__init__(**lowerCamelCase ,pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_norm
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_buckets
__SCREAMING_SNAKE_CASE = max_bucket_distance
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layerdrop
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_ctc_classes
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = do_stable_layer_norm
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
__SCREAMING_SNAKE_CASE = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = apply_spec_augment
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
# parameters for pretraining with codevector quantized representations
__SCREAMING_SNAKE_CASE = num_codevectors_per_group
__SCREAMING_SNAKE_CASE = num_codevector_groups
__SCREAMING_SNAKE_CASE = contrastive_logits_temperature
__SCREAMING_SNAKE_CASE = num_negatives
__SCREAMING_SNAKE_CASE = codevector_dim
__SCREAMING_SNAKE_CASE = proj_codevector_dim
__SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# adapter
__SCREAMING_SNAKE_CASE = add_adapter
__SCREAMING_SNAKE_CASE = adapter_kernel_size
__SCREAMING_SNAKE_CASE = adapter_stride
__SCREAMING_SNAKE_CASE = num_adapter_layers
__SCREAMING_SNAKE_CASE = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 109 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a__ : Tuple = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowercase__ =datasets.utils.logging.get_logger(__name__)
@dataclass
class a_ ( datasets.BuilderConfig ):
lowerCamelCase__ : int = 10000
lowerCamelCase__ : Optional[List[str]] = None
lowerCamelCase__ : Optional[datasets.Features] = None
class a_ ( datasets.ArrowBasedBuilder ):
lowerCamelCase__ : Optional[Any] = ParquetConfig
def lowerCAmelCase__ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
a_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase , (str, list, tuple) ):
a_ = data_files
if isinstance(UpperCAmelCase , UpperCAmelCase ):
a_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a_ = [dl_manager.iter_files(UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
a_ = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
a_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a_ = [dl_manager.iter_files(UpperCAmelCase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCAmelCase ):
with open(UpperCAmelCase , """rb""" ) as f:
a_ = datasets.Features.from_arrow_schema(pq.read_schema(UpperCAmelCase ) )
break
splits.append(datasets.SplitGenerator(name=UpperCAmelCase , gen_kwargs={"""files""": files} ) )
return splits
def lowerCAmelCase__ ( self , UpperCAmelCase ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
a_ = table_cast(UpperCAmelCase , self.info.features.arrow_schema )
return pa_table
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase ) ):
with open(UpperCAmelCase , """rb""" ) as f:
a_ = pq.ParquetFile(UpperCAmelCase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
a_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(UpperCAmelCase )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(UpperCAmelCase )}: {e}''' )
raise
| 704 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowercase__ =2
class a_ :
def __init__( self , *, # begin keyword-only arguments
UpperCAmelCase="<s>" , UpperCAmelCase="<pad>" , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase=None , ):
a_ , a_ , a_ , a_ = bos, unk, pad, eos
a_ = []
a_ = []
a_ = {}
a_ = self.add_symbol(UpperCAmelCase )
a_ = self.add_symbol(UpperCAmelCase )
a_ = self.add_symbol(UpperCAmelCase )
a_ = self.add_symbol(UpperCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCAmelCase )
a_ = len(self.symbols )
def __eq__( self , UpperCAmelCase ):
return self.indices == other.indices
def __getitem__( self , UpperCAmelCase ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ):
return len(self.symbols )
def __contains__( self , UpperCAmelCase ):
return sym in self.indices
@classmethod
def lowerCAmelCase__ ( cls , UpperCAmelCase ):
a_ = cls()
d.add_from_file(UpperCAmelCase )
return d
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=1 , UpperCAmelCase=False ):
if word in self.indices and not overwrite:
a_ = self.indices[word]
a_ = self.count[idx] + n
return idx
else:
a_ = len(self.symbols )
a_ = idx
self.symbols.append(UpperCAmelCase )
self.count.append(UpperCAmelCase )
return idx
def lowerCAmelCase__ ( self , UpperCAmelCase ):
return 0
def lowerCAmelCase__ ( self , UpperCAmelCase ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
try:
with open(UpperCAmelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(UpperCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(UpperCAmelCase ) )
return
a_ = f.readlines()
a_ = self._load_meta(UpperCAmelCase )
for line in lines[indices_start_line:]:
try:
a_ , a_ = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
a_ = True
a_ , a_ = line.rsplit(""" """ , 1 )
else:
a_ = False
a_ = int(UpperCAmelCase )
a_ = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(UpperCAmelCase ) )
self.add_symbol(UpperCAmelCase , n=UpperCAmelCase , overwrite=UpperCAmelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def UpperCamelCase_ ( A__ ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
a_ = dict((re.sub(r"""@@$""" , """""" , A__ ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , A__ ), v) for k, v in d.items() )
a_ = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
a_ = d[k] # restore
return da
def UpperCamelCase_ ( A__ , A__ ):
# prep
if not os.path.exists(A__ ):
raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(A__ , exist_ok=A__ )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
a_ = os.path.join(A__ , """checkpoint.pt""" )
if not os.path.isfile(A__ ):
raise ValueError(F'''path to the file {checkpoint_file} does not exist!''' )
a_ = torch.load(A__ , map_location="""cpu""" )
a_ = chkpt["""cfg"""]["""model"""]
# dicts
a_ = os.path.join(A__ , """dict.txt""" )
if not os.path.isfile(A__ ):
raise ValueError(F'''path to the file {dict_file} does not exist!''' )
a_ = Dictionary.load(A__ )
a_ = rewrite_dict_keys(src_dict.indices )
a_ = len(A__ )
a_ = os.path.join(A__ , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(A__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(A__ , ensure_ascii=A__ , indent=A__ ) )
# merges_file (bpecodes)
a_ = os.path.join(A__ , """bpecodes""" )
if not os.path.isfile(A__ ):
raise ValueError(F'''path to the file {bpecodes_file} does not exist!''' )
a_ = os.path.join(A__ , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(A__ , A__ )
# model config
a_ = os.path.join(A__ , """config.json""" )
a_ = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'''Generating {biogpt_model_config_file}''' )
with open(A__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(A__ , ensure_ascii=A__ , indent=A__ ) )
# tokenizer config
a_ = os.path.join(A__ , A__ )
a_ = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 10_24,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'''Generating {biogpt_tokenizer_config_file}''' )
with open(A__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(A__ , ensure_ascii=A__ , indent=A__ ) )
# model
a_ = chkpt["""model"""]
# remove unneeded keys
a_ = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(A__ , A__ )
a_ = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
a_ = model_state_dict.pop(A__ )
else:
a_ = model_state_dict.pop(A__ )
a_ = BioGptConfig.from_pretrained(A__ )
a_ = BioGptForCausalLM(A__ )
# check that it loads ok
model_new.load_state_dict(A__ )
# save
a_ = os.path.join(A__ , A__ )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(A__ , A__ )
print("""Conversion is done!""" )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase__ =parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 511 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case :
"""simple docstring"""
@staticmethod
def a__ ( *_lowercase, **_lowercase ) -> Tuple:
pass
@is_pipeline_test
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
_a = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def a__ ( self, _lowercase, _lowercase, _lowercase ) -> Tuple:
SCREAMING_SNAKE_CASE_ = pipeline(
'zero-shot-object-detection', model='hf-internal-testing/tiny-random-owlvit-object-detection' )
SCREAMING_SNAKE_CASE_ = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def a__ ( self, _lowercase, _lowercase ) -> str:
SCREAMING_SNAKE_CASE_ = object_detector(examples[0], threshold=0.0 )
SCREAMING_SNAKE_CASE_ = len(_lowercase )
self.assertGreater(_lowercase, 0 )
self.assertEqual(
_lowercase, [
{
'score': ANY(_lowercase ),
'label': ANY(_lowercase ),
'box': {'xmin': ANY(_lowercase ), 'ymin': ANY(_lowercase ), 'xmax': ANY(_lowercase ), 'ymax': ANY(_lowercase )},
}
for i in range(_lowercase )
], )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def a__ ( self ) -> Union[str, Any]:
pass
@require_torch
def a__ ( self ) -> str:
SCREAMING_SNAKE_CASE_ = pipeline(
'zero-shot-object-detection', model='hf-internal-testing/tiny-random-owlvit-object-detection' )
SCREAMING_SNAKE_CASE_ = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png', candidate_labels=['cat', 'remote', 'couch'], threshold=0.64, )
self.assertEqual(
nested_simplify(_lowercase, decimals=4 ), [
{'score': 0.7_235, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_218, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_184, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.6_748, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_656, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_614, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_456, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.6_419, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
], )
SCREAMING_SNAKE_CASE_ = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
], threshold=0.64, )
self.assertEqual(
nested_simplify(_lowercase, decimals=4 ), [
[
{'score': 0.7_235, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_218, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_184, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.6_748, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_656, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_614, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_456, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.6_419, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
]
], )
@require_torch
@slow
def a__ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = pipeline('zero-shot-object-detection' )
SCREAMING_SNAKE_CASE_ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg', candidate_labels=['cat', 'remote', 'couch'], )
self.assertEqual(
nested_simplify(_lowercase, decimals=4 ), [
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1_474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1_208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
], )
SCREAMING_SNAKE_CASE_ = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
], )
self.assertEqual(
nested_simplify(_lowercase, decimals=4 ), [
[
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1_474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1_208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
[
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1_474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1_208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
], )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def a__ ( self ) -> Optional[Any]:
pass
@require_torch
@slow
def a__ ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = 0.2
SCREAMING_SNAKE_CASE_ = pipeline('zero-shot-object-detection' )
SCREAMING_SNAKE_CASE_ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg', candidate_labels=['cat', 'remote', 'couch'], threshold=_lowercase, )
self.assertEqual(
nested_simplify(_lowercase, decimals=4 ), [
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
], )
@require_torch
@slow
def a__ ( self ) -> int:
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = pipeline('zero-shot-object-detection' )
SCREAMING_SNAKE_CASE_ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg', candidate_labels=['cat', 'remote', 'couch'], top_k=_lowercase, )
self.assertEqual(
nested_simplify(_lowercase, decimals=4 ), [
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
], )
| 294 |
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 8.3_1_4_4_5_9_8
def _UpperCamelCase ( lowerCAmelCase__: float ,lowerCAmelCase__: float ) -> float:
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
SCREAMING_SNAKE_CASE : List[str] = 300
SCREAMING_SNAKE_CASE : List[Any] = 28
SCREAMING_SNAKE_CASE : Optional[Any] = rms_speed_of_molecule(temperature, molar_mass)
print(f"Vrms of Nitrogen gas at 300 K is {vrms} m/s")
| 294 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Union[str, Any] = logging.get_logger(__name__)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=False):
SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight'''))
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias'''))
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight'''))
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias'''))
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight'''))
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias'''))
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight'''))
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias'''))
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight'''))
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias'''))
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
])
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
])
# if just the base model, we should remove "deit" from all keys that start with "deit"
SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith('deit') else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
])
return rename_keys
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False):
for i in range(config.num_hidden_layers):
if base_model:
SCREAMING_SNAKE_CASE = ''
else:
SCREAMING_SNAKE_CASE = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''')
SCREAMING_SNAKE_CASE = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = dct.pop(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = val
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase).raw)
return im
@torch.no_grad()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = DeiTConfig()
# all deit models have fine-tuned heads
SCREAMING_SNAKE_CASE = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
SCREAMING_SNAKE_CASE = 1000
SCREAMING_SNAKE_CASE = 'huggingface/label-files'
SCREAMING_SNAKE_CASE = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset') , 'r'))
SCREAMING_SNAKE_CASE = {int(_UpperCAmelCase): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = int(deit_name[-6:-4])
SCREAMING_SNAKE_CASE = int(deit_name[-3:])
# size of the architecture
if deit_name[9:].startswith('tiny'):
SCREAMING_SNAKE_CASE = 192
SCREAMING_SNAKE_CASE = 768
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = 3
elif deit_name[9:].startswith('small'):
SCREAMING_SNAKE_CASE = 384
SCREAMING_SNAKE_CASE = 1536
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = 6
if deit_name[9:].startswith('base'):
pass
elif deit_name[4:].startswith('large'):
SCREAMING_SNAKE_CASE = 1024
SCREAMING_SNAKE_CASE = 4096
SCREAMING_SNAKE_CASE = 24
SCREAMING_SNAKE_CASE = 16
# load original model from timm
SCREAMING_SNAKE_CASE = timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE = timm_model.state_dict()
SCREAMING_SNAKE_CASE = create_rename_keys(_UpperCAmelCase , _UpperCAmelCase)
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# load HuggingFace model
SCREAMING_SNAKE_CASE = DeiTForImageClassificationWithTeacher(_UpperCAmelCase).eval()
model.load_state_dict(_UpperCAmelCase)
# Check outputs on an image, prepared by DeiTImageProcessor
SCREAMING_SNAKE_CASE = int(
(256 / 224) * config.image_size) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
SCREAMING_SNAKE_CASE = DeiTImageProcessor(size=_UpperCAmelCase , crop_size=config.image_size)
SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors='pt')
SCREAMING_SNAKE_CASE = encoding['pixel_values']
SCREAMING_SNAKE_CASE = model(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = timm_model(_UpperCAmelCase)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_UpperCAmelCase , outputs.logits , atol=1e-3)
Path(_UpperCAmelCase).mkdir(exist_ok=_UpperCAmelCase)
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(_UpperCAmelCase)
print(F'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(_UpperCAmelCase)
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
a_ : int = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 444 |
import os
# Precomputes a list of the 100 first triangular numbers
a_ : str = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)]
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = os.path.dirname(os.path.realpath(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , 'words.txt')
SCREAMING_SNAKE_CASE = ''
with open(_UpperCAmelCase) as f:
SCREAMING_SNAKE_CASE = f.readline()
SCREAMING_SNAKE_CASE = [word.strip('"') for word in words.strip('\r\n').split(',')]
SCREAMING_SNAKE_CASE = [
word
for word in [sum(ord(_UpperCAmelCase) - 64 for x in word) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase)
if __name__ == "__main__":
print(solution())
| 444 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["""GPTSw3Tokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 19 |
from __future__ import annotations
from fractions import Fraction
def lowercase ( _a ,_a ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def lowercase ( _a ) -> list[str]:
UpperCAmelCase_: int = []
UpperCAmelCase_: Any = 11
UpperCAmelCase_: Union[str, Any] = int("1" + "0" * digit_len )
for num in range(_a ,_a ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_a ,_a ):
solutions.append(f"{num}/{den}" )
den += 1
num += 1
UpperCAmelCase_: Any = 10
return solutions
def lowercase ( _a = 2 ) -> int:
UpperCAmelCase_: Dict = 1.0
for fraction in fraction_list(_a ):
UpperCAmelCase_: int = Fraction(_a )
result *= frac.denominator / frac.numerator
return int(_a )
if __name__ == "__main__":
print(solution()) | 137 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
lowerCAmelCase : Any ={
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class _a ( snake_case_ ):
_UpperCamelCase: Union[str, Any] = "speech_to_text_2"
_UpperCamelCase: Optional[int] = ["past_key_values"]
_UpperCamelCase: Dict = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , lowercase_=10000 , lowercase_=6 , lowercase_=2048 , lowercase_=4 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0_2 , lowercase_=2 , lowercase_=True , lowercase_=1 , lowercase_=0 , lowercase_=2 , lowercase_=1024 , **lowercase_ , ) -> List[Any]:
lowerCAmelCase : str = vocab_size
lowerCAmelCase : List[Any] = d_model
lowerCAmelCase : Union[str, Any] = decoder_ffn_dim
lowerCAmelCase : List[str] = decoder_layers
lowerCAmelCase : List[Any] = decoder_attention_heads
lowerCAmelCase : Dict = dropout
lowerCAmelCase : int = attention_dropout
lowerCAmelCase : str = activation_dropout
lowerCAmelCase : int = activation_function
lowerCAmelCase : int = init_std
lowerCAmelCase : Any = decoder_layerdrop
lowerCAmelCase : str = use_cache
lowerCAmelCase : Any = decoder_layers
lowerCAmelCase : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase : Optional[Any] = max_target_positions
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 693 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Optional[int] ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "transfo-xl"
_UpperCamelCase: str = ["mems"]
_UpperCamelCase: Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , lowercase_=267735 , lowercase_=[20000, 40000, 200000] , lowercase_=1024 , lowercase_=1024 , lowercase_=16 , lowercase_=64 , lowercase_=4096 , lowercase_=4 , lowercase_=False , lowercase_=18 , lowercase_=1600 , lowercase_=1000 , lowercase_=True , lowercase_=True , lowercase_=0 , lowercase_=-1 , lowercase_=True , lowercase_=0.1 , lowercase_=0.0 , lowercase_=True , lowercase_="normal" , lowercase_=0.0_1 , lowercase_=0.0_1 , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=0 , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Union[str, Any] = []
self.cutoffs.extend(lowercase_ )
if proj_share_all_but_first:
lowerCAmelCase : Optional[int] = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase : List[str] = [False] + [False] * len(self.cutoffs )
lowerCAmelCase : Optional[int] = d_model
lowerCAmelCase : List[Any] = d_embed
lowerCAmelCase : Union[str, Any] = d_head
lowerCAmelCase : List[Any] = d_inner
lowerCAmelCase : Optional[int] = div_val
lowerCAmelCase : List[Any] = pre_lnorm
lowerCAmelCase : Dict = n_layer
lowerCAmelCase : Tuple = n_head
lowerCAmelCase : Any = mem_len
lowerCAmelCase : Union[str, Any] = same_length
lowerCAmelCase : List[Any] = attn_type
lowerCAmelCase : int = clamp_len
lowerCAmelCase : List[str] = sample_softmax
lowerCAmelCase : Optional[int] = adaptive
lowerCAmelCase : Dict = dropout
lowerCAmelCase : Optional[Any] = dropatt
lowerCAmelCase : List[str] = untie_r
lowerCAmelCase : List[str] = init
lowerCAmelCase : Tuple = init_range
lowerCAmelCase : str = proj_init_std
lowerCAmelCase : str = init_std
lowerCAmelCase : Optional[int] = layer_norm_epsilon
super().__init__(eos_token_id=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _snake_case ( self , lowercase_ ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 693 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , ) -> tuple[str, float]:
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=False) ->Optional[Any]:
'''simple docstring'''
A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
if return_labels:
if model_class in get_values(UpperCAmelCase__):
A__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
return inputs_dict
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int=13 , UpperCAmelCase__ : str=7 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : List[str]=32 , UpperCAmelCase__ : Optional[int]=32 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : List[Any]=512 , UpperCAmelCase__ : Tuple=16 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Tuple=None , ) ->Any:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = embedding_size
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length])
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
A__ = ids_tensor([self.batch_size] , self.num_choices)
A__ = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any]) ->Any:
'''simple docstring'''
A__ = TFMobileBertModel(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
A__ = [input_ids, input_mask]
A__ = model(UpperCAmelCase__)
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple) ->Optional[Any]:
'''simple docstring'''
A__ = TFMobileBertForMaskedLM(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any]) ->int:
'''simple docstring'''
A__ = TFMobileBertForNextSentencePrediction(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int) ->List[Any]:
'''simple docstring'''
A__ = TFMobileBertForPreTraining(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple) ->Dict:
'''simple docstring'''
A__ = self.num_labels
A__ = TFMobileBertForSequenceClassification(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int) ->Dict:
'''simple docstring'''
A__ = self.num_choices
A__ = TFMobileBertForMultipleChoice(config=UpperCAmelCase__)
A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1))
A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1))
A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1))
A__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]) ->int:
'''simple docstring'''
A__ = self.num_labels
A__ = TFMobileBertForTokenClassification(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = TFMobileBertForQuestionAnswering(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self : Any) ->str:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
'''simple docstring'''
A__ = TFMobileBertModelTest.TFMobileBertModelTester(self)
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
A__ = TFMobileBertModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
'''simple docstring'''
A__ = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''')
A__ = tf.constant([[0, 1, 2, 3, 4, 5]])
A__ = model(UpperCAmelCase__)[0]
A__ = [1, 6, 30_522]
self.assertEqual(output.shape , UpperCAmelCase__)
A__ = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
])
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4)
| 87 | 1 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
UpperCamelCase__ =['''image_processor''', '''tokenizer''']
UpperCamelCase__ ='''OwlViTImageProcessor'''
UpperCamelCase__ =('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : List[str] , lowerCamelCase_ : int=None , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : Dict ) -> List[Any]:
__magic_name__ : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __a , )
__magic_name__ : Optional[Any] = kwargs.pop('''feature_extractor''' )
__magic_name__ : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__a , __a )
def __call__( self : Dict , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : int=None , lowerCamelCase_ : List[str]="max_length" , lowerCamelCase_ : List[Any]="np" , **lowerCamelCase_ : Optional[int] ) -> List[str]:
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(__a , __a ) or (isinstance(__a , __a ) and not isinstance(text[0] , __a )):
__magic_name__ : Tuple = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )]
elif isinstance(__a , __a ) and isinstance(text[0] , __a ):
__magic_name__ : List[Any] = []
# Maximum number of queries across batch
__magic_name__ : List[Any] = max([len(__a ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__a ) != max_num_queries:
__magic_name__ : Optional[Any] = t + [''' '''] * (max_num_queries - len(__a ))
__magic_name__ : Any = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )
encodings.append(__a )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
__magic_name__ : Tuple = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
__magic_name__ : Union[str, Any] = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__magic_name__ : Optional[int] = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
__magic_name__ : List[str] = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__magic_name__ : int = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
__magic_name__ : Optional[int] = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__magic_name__ : int = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
__magic_name__ : List[Any] = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
__magic_name__ : str = BatchEncoding()
__magic_name__ : str = input_ids
__magic_name__ : Optional[Any] = attention_mask
if query_images is not None:
__magic_name__ : int = BatchEncoding()
__magic_name__ : Optional[Any] = self.image_processor(
__a , return_tensors=__a , **__a ).pixel_values
__magic_name__ : Union[str, Any] = query_pixel_values
if images is not None:
__magic_name__ : Dict = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
__magic_name__ : List[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__magic_name__ : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def UpperCAmelCase__ ( self : str , *lowerCamelCase_ : Dict , **lowerCamelCase_ : List[str] ) -> str:
return self.image_processor.post_process(*__a , **__a )
def UpperCAmelCase__ ( self : Union[str, Any] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : str ) -> Dict:
return self.image_processor.post_process_object_detection(*__a , **__a )
def UpperCAmelCase__ ( self : str , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Optional[int] ) -> Optional[Any]:
return self.image_processor.post_process_image_guided_detection(*__a , **__a )
def UpperCAmelCase__ ( self : Tuple , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Any ) -> Optional[int]:
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCAmelCase__ ( self : Any , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Optional[int] ) -> Optional[Any]:
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCAmelCase__ ( self : int ) -> str:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __a , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __a , )
return self.image_processor
| 712 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ =inspect.getfile(accelerate.test_utils )
UpperCamelCase__ =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
UpperCamelCase__ =['''accelerate''', '''launch''']
UpperCamelCase__ =Path.home() / '''.cache/huggingface/accelerate'''
UpperCamelCase__ ='''default_config.yaml'''
UpperCamelCase__ =config_folder / config_file
UpperCamelCase__ =config_folder / '''_default_config.yaml'''
UpperCamelCase__ =Path('''tests/test_configs''' )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] ) -> int:
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def UpperCAmelCase__ ( cls : Dict ) -> List[str]:
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
__magic_name__ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=lowerCamelCase_ ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(lowerCamelCase_ ), self.test_file_path] , env=os.environ.copy() )
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ ='''test-tpu'''
UpperCamelCase__ ='''us-central1-a'''
UpperCamelCase__ ='''ls'''
UpperCamelCase__ =['''accelerate''', '''tpu-config''']
UpperCamelCase__ ='''cd /usr/share'''
UpperCamelCase__ ='''tests/test_samples/test_command_file.sh'''
UpperCamelCase__ ='''Running gcloud compute tpus tpu-vm ssh'''
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
__magic_name__ : str = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__ : int = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
__magic_name__ : Any = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=lowerCamelCase_ )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
__magic_name__ : Tuple = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
__magic_name__ : str = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : Union[str, Any] = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
__magic_name__ : List[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : str ) -> Dict:
__magic_name__ : Tuple = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : Any ) -> Tuple:
__magic_name__ : Tuple = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , lowerCamelCase_ , )
| 501 | 0 |
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert("RGB" )
UpperCAmelCase_ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 ).to(snake_case_ )
return image
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if "visual_encoder" in key:
UpperCAmelCase_ = re.sub("visual_encoder*" , "vision_model.encoder" , snake_case_ )
if "blocks" in key:
UpperCAmelCase_ = re.sub(R"blocks" , "layers" , snake_case_ )
if "attn" in key:
UpperCAmelCase_ = re.sub(R"attn" , "self_attn" , snake_case_ )
if "norm1" in key:
UpperCAmelCase_ = re.sub(R"norm1" , "layer_norm1" , snake_case_ )
if "norm2" in key:
UpperCAmelCase_ = re.sub(R"norm2" , "layer_norm2" , snake_case_ )
if "encoder.norm" in key:
UpperCAmelCase_ = re.sub(R"encoder.norm" , "post_layernorm" , snake_case_ )
if "encoder.patch_embed.proj" in key:
UpperCAmelCase_ = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , snake_case_ )
if "encoder.pos_embed" in key:
UpperCAmelCase_ = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , snake_case_ )
if "encoder.cls_token" in key:
UpperCAmelCase_ = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , snake_case_ )
if "self_attn" in key:
UpperCAmelCase_ = re.sub(R"self_attn.proj" , "self_attn.projection" , snake_case_ )
return key
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Any=None ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase_ = BlipConfig.from_pretrained(snake_case_ )
else:
UpperCAmelCase_ = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
UpperCAmelCase_ = BlipForConditionalGeneration(snake_case_ ).eval()
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
UpperCAmelCase_ = blip_decoder(pretrained=snake_case_ , image_size=3_84 , vit="base" )
UpperCAmelCase_ = pt_model.eval()
UpperCAmelCase_ = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
hf_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = 3_84
UpperCAmelCase_ = load_demo_image(image_size=snake_case_ , device="cpu" )
UpperCAmelCase_ = BertTokenizer.from_pretrained("bert-base-uncased" )
UpperCAmelCase_ = tokenizer(["a picture of"] ).input_ids
UpperCAmelCase_ = hf_model.generate(snake_case_ , snake_case_ )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
UpperCAmelCase_ = hf_model.generate(snake_case_ )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCAmelCase_ = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
UpperCAmelCase_ = blip_vqa(pretrained=snake_case_ , image_size=snake_case_ , vit="base" )
vqa_model.eval()
UpperCAmelCase_ = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = BlipForQuestionAnswering(snake_case_ )
hf_vqa_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = ["How many dogs are in this image?"]
UpperCAmelCase_ = tokenizer(snake_case_ , return_tensors="pt" ).input_ids
UpperCAmelCase_ = hf_vqa_model.generate(snake_case_ , snake_case_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
UpperCAmelCase_ = blip_itm(pretrained=snake_case_ , image_size=snake_case_ , vit="base" )
itm_model.eval()
UpperCAmelCase_ = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = BlipForImageTextRetrieval(snake_case_ )
UpperCAmelCase_ = ["A picture of a woman with a dog sitting in a beach"]
UpperCAmelCase_ = tokenizer(
snake_case_ , return_tensors="pt" , padding="max_length" , truncation=snake_case_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case_ )
hf_itm_model.eval()
UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[Any] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 78 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Union[str, Any] = ['audio_values', 'audio_mask']
def __init__( self : Dict , a : Optional[Any]=2_048 , a : Union[str, Any]=1 , a : str=[16, 16] , a : Optional[int]=128 , a : str=44_100 , a : List[str]=86 , a : int=2_048 , a : Tuple=0.0 , **a : int , )-> Any:
"""simple docstring"""
super().__init__(
feature_size=a , sampling_rate=a , padding_value=a , **a , )
lowercase__ = spectrogram_length
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = feature_size // self.patch_size[1]
lowercase__ = n_fft
lowercase__ = sampling_rate // hop_length_to_sampling_rate
lowercase__ = sampling_rate
lowercase__ = padding_value
lowercase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=a , norm='slaney' , mel_scale='slaney' , ).T
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : np.array )-> np.ndarray:
"""simple docstring"""
lowercase__ = spectrogram(
a , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
lowercase__ = log_spec[:, :-1]
lowercase__ = log_spec - 20.0
lowercase__ = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : List[Any] , a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a : Optional[Union[str, TensorType]] = None , a : Optional[bool] = True , a : Optional[int] = None , a : bool = False , a : bool = False , **a : Union[str, Any] , )-> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowercase__ = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a , np.ndarray ):
lowercase__ = np.asarray(a , dtype=np.floataa )
elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowercase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , a ):
lowercase__ = [np.asarray(a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowercase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowercase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowercase__ = np.array(a ).astype(np.floataa )
# convert into correct format for padding
lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowercase__ = np.ones([len(a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowercase__ = padded_audio_features * self.padding_value
for i in range(len(a ) ):
lowercase__ = audio_features[i]
lowercase__ = feature
# return as BatchFeature
if return_attention_mask:
lowercase__ = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
lowercase__ = {'audio_values': padded_audio_features}
lowercase__ = BatchFeature(data=a , tensor_type=a )
return encoded_inputs
| 235 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A__ : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[int], lowerCamelCase : int=64, lowerCamelCase : Union[str, Any]=48_000, lowerCamelCase : str=480, lowerCamelCase : Tuple=10, lowerCamelCase : List[Any]=1_024, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[Any]=False, lowerCamelCase : float = 0, lowerCamelCase : float = 14_000, lowerCamelCase : int = None, lowerCamelCase : str = "fusion", lowerCamelCase : str = "repeatpad", **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase, sampling_rate=lowerCamelCase, padding_value=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
lowercase__ = top_db
lowercase__ = truncation
lowercase__ = padding
lowercase__ = fft_window_size
lowercase__ = (fft_window_size >> 1) + 1
lowercase__ = hop_length
lowercase__ = max_length_s
lowercase__ = max_length_s * sampling_rate
lowercase__ = sampling_rate
lowercase__ = frequency_min
lowercase__ = frequency_max
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm=lowerCamelCase, mel_scale='''htk''', )
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm='''slaney''', mel_scale='''slaney''', )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase__ ( self : Optional[int], lowerCamelCase : np.array, lowerCamelCase : Optional[np.array] = None ):
'''simple docstring'''
lowercase__ = spectrogram(
lowerCamelCase, window_function(self.fft_window_size, '''hann''' ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=lowerCamelCase, log_mel='''dB''', )
return log_mel_spectrogram.T
def lowercase__ ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
# randomly choose index for each part
lowercase__ = np.random.choice(ranges[0] )
lowercase__ = np.random.choice(ranges[1] )
lowercase__ = np.random.choice(ranges[2] )
lowercase__ = mel[idx_front : idx_front + chunk_frames, :]
lowercase__ = mel[idx_middle : idx_middle + chunk_frames, :]
lowercase__ = mel[idx_back : idx_back + chunk_frames, :]
lowercase__ = torch.tensor(mel[None, None, :] )
lowercase__ = torch.nn.functional.interpolate(
lowerCamelCase, size=[chunk_frames, 64], mode='''bilinear''', align_corners=lowerCamelCase )
lowercase__ = mel_shrink[0][0].numpy()
lowercase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def lowercase__ ( self : List[str], lowerCamelCase : np.array, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowercase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowercase__ = len(lowerCamelCase ) - max_length
lowercase__ = np.random.randint(0, overflow + 1 )
lowercase__ = waveform[idx : idx + max_length]
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowercase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowercase__ = np.stack([mel, mel, mel, mel], axis=0 )
lowercase__ = False
else:
lowercase__ = self._random_mel_fusion(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
lowercase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, lowerCamelCase ) )
lowercase__ = np.pad(lowerCamelCase, (0, max_length - waveform.shape[0]), mode='''constant''', constant_values=0 )
if truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any], lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], lowerCamelCase : str = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : List[str], ):
'''simple docstring'''
lowercase__ = truncation if truncation is not None else self.truncation
lowercase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase__ = isinstance(lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase, np.ndarray ):
lowercase__ = np.asarray(lowerCamelCase, dtype=np.floataa )
elif isinstance(lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
lowercase__ = [
self._get_input_mel(lowerCamelCase, max_length if max_length else self.nb_max_samples, lowerCamelCase, lowerCamelCase )
for waveform in raw_speech
]
lowercase__ = []
lowercase__ = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowercase__ = np.random.randint(0, len(lowerCamelCase ) )
lowercase__ = True
if isinstance(input_mel[0], lowerCamelCase ):
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowercase__ = [[longer] for longer in is_longer]
lowercase__ = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowercase__ = BatchFeature(lowerCamelCase )
if return_tensors is not None:
lowercase__ = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 714 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
lowercase__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('''RGB''' )
lowercase__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
lowercase__ = transform(lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
return image
def a ( lowerCamelCase_ ):
'''simple docstring'''
if "visual_encoder" in key:
lowercase__ = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , lowerCamelCase_ )
if "blocks" in key:
lowercase__ = re.sub(r'''blocks''' , '''layers''' , lowerCamelCase_ )
if "attn" in key:
lowercase__ = re.sub(r'''attn''' , '''self_attn''' , lowerCamelCase_ )
if "norm1" in key:
lowercase__ = re.sub(r'''norm1''' , '''layer_norm1''' , lowerCamelCase_ )
if "norm2" in key:
lowercase__ = re.sub(r'''norm2''' , '''layer_norm2''' , lowerCamelCase_ )
if "encoder.norm" in key:
lowercase__ = re.sub(r'''encoder.norm''' , '''post_layernorm''' , lowerCamelCase_ )
if "encoder.patch_embed.proj" in key:
lowercase__ = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , lowerCamelCase_ )
if "encoder.pos_embed" in key:
lowercase__ = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , lowerCamelCase_ )
if "encoder.cls_token" in key:
lowercase__ = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , lowerCamelCase_ )
if "self_attn" in key:
lowercase__ = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , lowerCamelCase_ )
return key
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
if config_path is not None:
lowercase__ = BlipConfig.from_pretrained(lowerCamelCase_ )
else:
lowercase__ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowercase__ = BlipForConditionalGeneration(lowerCamelCase_ ).eval()
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
lowercase__ = blip_decoder(pretrained=lowerCamelCase_ , image_size=384 , vit='''base''' )
lowercase__ = pt_model.eval()
lowercase__ = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
hf_model.load_state_dict(lowerCamelCase_ )
lowercase__ = 384
lowercase__ = load_demo_image(image_size=lowerCamelCase_ , device='''cpu''' )
lowercase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase__ = tokenizer(['''a picture of'''] ).input_ids
lowercase__ = hf_model.generate(lowerCamelCase_ , lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowercase__ = hf_model.generate(lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCamelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase__ = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
lowercase__ = blip_vqa(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
vqa_model.eval()
lowercase__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForQuestionAnswering(lowerCamelCase_ )
hf_vqa_model.load_state_dict(lowerCamelCase_ )
lowercase__ = ['''How many dogs are in this image?''']
lowercase__ = tokenizer(lowerCamelCase_ , return_tensors='''pt''' ).input_ids
lowercase__ = hf_vqa_model.generate(lowerCamelCase_ , lowerCamelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
lowercase__ = blip_itm(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
itm_model.eval()
lowercase__ = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForImageTextRetrieval(lowerCamelCase_ )
lowercase__ = ['''A picture of a woman with a dog sitting in a beach''']
lowercase__ = tokenizer(
lowerCamelCase_ , return_tensors='''pt''' , padding='''max_length''' , truncation=lowerCamelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCamelCase_ )
hf_itm_model.eval()
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
A__ : List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 671 | 0 |
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def _a ( _lowerCamelCase ) -> str:
"""simple docstring"""
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
__snake_case : Union[str, Any] = precision
__snake_case : List[Any] = ceil(precision / 14 )
__snake_case : List[str] = 42_6880 * Decimal(1_0005 ).sqrt()
__snake_case : int = 1
__snake_case : Tuple = 1359_1409
__snake_case : Any = Decimal(_lowerCamelCase )
for k in range(1 , _lowerCamelCase ):
__snake_case : str = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__UpperCamelCase = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 26 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__UpperCamelCase = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Path , __magic_name__ : Union[str, None] = None , __magic_name__ : Union[List[str], None] = None , __magic_name__ : Union[str, List[str], None] = None , __magic_name__ : bool = True , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = [file for file in os.listdir(__magic_name__ ) if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) )]
if identifier is not None:
__snake_case : List[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__magic_name__ , __magic_name__ ):
for n_ in n_identifier:
__snake_case : Optional[int] = [file for file in files if n_ not in file]
else:
__snake_case : Tuple = [file for file in files if n_identifier not in file]
__snake_case : Dict = ignore_files or []
ignore_files.append("""__init__.py""" )
__snake_case : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __magic_name__ )
if only_modules:
__snake_case : List[Any] = file.split(""".""" )[0]
try:
__snake_case : List[Any] = getattr(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = doctest.DocTestSuite(__magic_name__ )
__snake_case : Dict = unittest.TextTestRunner().run(__magic_name__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__snake_case : Tuple = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[Any] = """modeling"""
__snake_case : Union[str, Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__magic_name__ , identifier=__magic_name__ , ignore_files=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = Path("""src/transformers""" )
__snake_case : Any = """tokenization"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[str] = """configuration"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = Path("""src/transformers""" )
__snake_case : int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__magic_name__ , n_identifier=__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = Path("""docs/source""" )
__snake_case : Optional[int] = ["""favicon.ico"""]
self.analyze_directory(__magic_name__ , ignore_files=__magic_name__ , only_modules=__magic_name__ )
| 26 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class __magic_name__ ( __a ):
"""simple docstring"""
lowerCAmelCase : Tuple = '''xlm'''
lowerCAmelCase : str = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self : Any , _lowercase : Union[str, Any]=30_145 , _lowercase : Any=2_048 , _lowercase : int=12 , _lowercase : Any=16 , _lowercase : Any=0.1 , _lowercase : Dict=0.1 , _lowercase : Optional[Any]=True , _lowercase : Optional[Any]=False , _lowercase : List[str]=False , _lowercase : List[str]=False , _lowercase : Tuple=1 , _lowercase : List[Any]=True , _lowercase : int=512 , _lowercase : Tuple=2_048**-0.5 , _lowercase : List[str]=1E-12 , _lowercase : Optional[Any]=0.02 , _lowercase : int=0 , _lowercase : Union[str, Any]=1 , _lowercase : Optional[int]=2 , _lowercase : Optional[int]=3 , _lowercase : List[Any]=5 , _lowercase : str=True , _lowercase : Optional[int]="first" , _lowercase : Optional[Any]=True , _lowercase : List[str]=None , _lowercase : Tuple=True , _lowercase : Union[str, Any]=0.1 , _lowercase : Union[str, Any]=5 , _lowercase : List[Any]=5 , _lowercase : Optional[Any]=0 , _lowercase : int=0 , _lowercase : str=2 , _lowercase : Optional[int]=0 , **_lowercase : Optional[int] , ):
"""simple docstring"""
_UpperCamelCase: Dict = vocab_size
_UpperCamelCase: Dict = emb_dim
_UpperCamelCase: List[str] = n_layers
_UpperCamelCase: Union[str, Any] = n_heads
_UpperCamelCase: Optional[int] = dropout
_UpperCamelCase: Dict = attention_dropout
_UpperCamelCase: str = gelu_activation
_UpperCamelCase: List[Any] = sinusoidal_embeddings
_UpperCamelCase: Optional[Any] = causal
_UpperCamelCase: Any = asm
_UpperCamelCase: List[Any] = n_langs
_UpperCamelCase: List[Any] = use_lang_emb
_UpperCamelCase: str = layer_norm_eps
_UpperCamelCase: Optional[int] = bos_index
_UpperCamelCase: Optional[int] = eos_index
_UpperCamelCase: Dict = pad_index
_UpperCamelCase: Tuple = unk_index
_UpperCamelCase: int = mask_index
_UpperCamelCase: Optional[int] = is_encoder
_UpperCamelCase: List[str] = max_position_embeddings
_UpperCamelCase: str = embed_init_std
_UpperCamelCase: List[Any] = init_std
_UpperCamelCase: str = summary_type
_UpperCamelCase: Optional[Any] = summary_use_proj
_UpperCamelCase: Optional[int] = summary_activation
_UpperCamelCase: List[Any] = summary_proj_to_labels
_UpperCamelCase: Tuple = summary_first_dropout
_UpperCamelCase: Dict = start_n_top
_UpperCamelCase: Any = end_n_top
_UpperCamelCase: Optional[Any] = mask_token_id
_UpperCamelCase: Dict = lang_id
if "n_words" in kwargs:
_UpperCamelCase: str = kwargs['''n_words''']
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , **_lowercase )
class __magic_name__ ( __a ):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCamelCase: str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCamelCase: List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] ) | 264 | def lowerCAmelCase_ ( lowercase: int = 600_851_475_143 ) -> int:
'''simple docstring'''
try:
_UpperCamelCase: Optional[int] = int(lowercase )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
_UpperCamelCase: Dict = 1
_UpperCamelCase: Tuple = 2
while i * i <= n:
while n % i == 0:
_UpperCamelCase: List[Any] = i
n //= i
i += 1
if n > 1:
_UpperCamelCase: Any = n
return int(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""") | 264 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
_A: Optional[int] = True
except (ImportError, ModuleNotFoundError):
_A: Dict = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def _lowerCAmelCase ( _lowerCAmelCase )-> str:
re.sub('<n>' , '' , _lowerCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_lowerCAmelCase ) )
| 126 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class a_ ( lowerCamelCase ):
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """tf_padding""" ) )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """depth_multiplier""" ) )
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=0.2_5 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE="relu6" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=None , ) -> List[str]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = depth_multiplier
UpperCamelCase = min_depth
UpperCamelCase = tf_padding
UpperCamelCase = int(last_hidden_size * depth_multiplier )
UpperCamelCase = output_stride
UpperCamelCase = hidden_act
UpperCamelCase = classifier_dropout_prob
UpperCamelCase = use_labels
UpperCamelCase = is_training
UpperCamelCase = num_labels
UpperCamelCase = initializer_range
UpperCamelCase = scope
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = MobileNetVaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowercase = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = MobileNetVaModelTester(self )
UpperCamelCase = MobileNetVaConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def A__ ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def A__ ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def A__ ( self ) -> Dict:
"""simple docstring"""
pass
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = 26
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def A__ ( self ) -> Dict:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = MobileNetVaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowercase__ ( )-> Optional[Any]:
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> Dict:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 35 | 1 |
"""simple docstring"""
from collections.abc import Sequence
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any ) -> Dict:
return sum(c * (x**i) for i, c in enumerate(_A ) )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] ) -> List[str]:
__a = 0.0
for coeff in reversed(_A ):
__a = result * x + coeff
return result
if __name__ == "__main__":
lowercase_ = (0.0, 0.0, 5.0, 9.3, 7.0)
lowercase_ = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 695 | from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def __lowerCAmelCase ( _A ):
"""simple docstring"""
return DownloadCommand(args.model ,args.cache_dir ,args.force ,args.trust_remote_code )
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def _UpperCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
_lowercase = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=UpperCAmelCase , default=UpperCAmelCase , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=UpperCAmelCase , help="""Name of the model to download""" )
download_parser.set_defaults(func=UpperCAmelCase )
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
_lowercase = model
_lowercase = cache
_lowercase = force
_lowercase = trust_remote_code
def _UpperCAmelCase ( self ):
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 398 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str=7 , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : Optional[Any]=10 , UpperCAmelCase_ : Tuple=18 , UpperCAmelCase_ : int=30 , UpperCAmelCase_ : Dict=400 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[int]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Tuple=None , ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = size if size is not None else {'shortest_edge': 18}
_lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = num_frames
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
_lowerCAmelCase = crop_size
def __lowerCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] = VivitImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = VivitImageProcessingTester(self )
@property
def __lowerCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'image_std' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'size' ) )
def __lowerCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def __lowerCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __lowerCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 710 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
_lowerCAmelCase = generate_pascal_triangle(SCREAMING_SNAKE_CASE )
for row_idx in range(SCREAMING_SNAKE_CASE ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
_lowerCAmelCase = []
for current_row_idx in range(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = populate_current_row(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
triangle.append(SCREAMING_SNAKE_CASE )
return triangle
def __snake_case ( SCREAMING_SNAKE_CASE: list[list[int]] , SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
_lowerCAmelCase = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
_lowerCAmelCase , _lowerCAmelCase = 1, 1
for current_col_idx in range(1 , SCREAMING_SNAKE_CASE ):
calculate_current_element(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return current_row
def __snake_case ( SCREAMING_SNAKE_CASE: list[list[int]] , SCREAMING_SNAKE_CASE: list[int] , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int , ):
"""simple docstring"""
_lowerCAmelCase = triangle[current_row_idx - 1][current_col_idx - 1]
_lowerCAmelCase = triangle[current_row_idx - 1][current_col_idx]
_lowerCAmelCase = above_to_left_elt + above_to_right_elt
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
_lowerCAmelCase = [[1]]
for row_index in range(1 , SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = [0] + result[-1] + [0]
_lowerCAmelCase = row_index + 1
# Calculate the number of distinct elements in a row
_lowerCAmelCase = sum(divmod(SCREAMING_SNAKE_CASE , 2 ) )
_lowerCAmelCase = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
_lowerCAmelCase = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
_lowerCAmelCase = row_first_half + row_second_half
result.append(SCREAMING_SNAKE_CASE )
return result
def __snake_case ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(SCREAMING_SNAKE_CASE: Callable , SCREAMING_SNAKE_CASE: int ) -> None:
_lowerCAmelCase = f"""{func.__name__}({value})"""
_lowerCAmelCase = timeit(f"""__main__.{call}""" , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 491 | 0 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=99 , snake_case=13 , snake_case=7 , snake_case=9 , snake_case=True , snake_case=True , snake_case=False , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case=8 , snake_case=0.1 , snake_case=0.002 , snake_case=1 , snake_case=0 , snake_case=0 , snake_case=None , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = encoder_seq_length
lowercase = decoder_seq_length
# For common tests
lowercase = self.decoder_seq_length
lowercase = is_training
lowercase = use_attention_mask
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = d_ff
lowercase = relative_attention_num_buckets
lowercase = dropout_rate
lowercase = initializer_factor
lowercase = eos_token_id
lowercase = pad_token_id
lowercase = decoder_start_token_id
lowercase = None
lowercase = decoder_layers
def SCREAMING_SNAKE_CASE__ ( self ):
return TaConfig.from_pretrained('google/umt5-base' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ):
if attention_mask is None:
lowercase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowercase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowercase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=snake_case )
if decoder_head_mask is None:
lowercase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=snake_case )
if cross_attn_head_mask is None:
lowercase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=snake_case )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowercase = input_ids.clamp(self.pad_token_id + 1 )
lowercase = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowercase = self.get_config()
lowercase = config.num_attention_heads
lowercase = self.prepare_inputs_dict(snake_case , snake_case , snake_case )
return config, input_dict
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE__ ( self ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = UMTaModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(
input_ids=snake_case , decoder_input_ids=snake_case , attention_mask=snake_case , decoder_attention_mask=snake_case , )
lowercase = model(input_ids=snake_case , decoder_input_ids=snake_case )
lowercase = result.last_hidden_state
lowercase = result.past_key_values
lowercase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(snake_case ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = UMTaModel(config=snake_case ).get_decoder().to(snake_case ).eval()
# first forward pass
lowercase = model(snake_case , use_cache=snake_case )
lowercase = model(snake_case )
lowercase = model(snake_case , use_cache=snake_case )
self.parent.assertTrue(len(snake_case ) == len(snake_case ) )
self.parent.assertTrue(len(snake_case ) == len(snake_case ) + 1 )
lowercase , lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase = model(snake_case )['last_hidden_state']
lowercase = model(snake_case , past_key_values=snake_case )['last_hidden_state']
# select random slice
lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase = output_from_no_past[:, -1, random_slice_idx].detach()
lowercase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , ):
lowercase = UMTaModel(config=snake_case ).to(snake_case ).half().eval()
lowercase = model(**snake_case )['last_hidden_state']
self.parent.assertFalse(torch.isnan(snake_case ).any().item() )
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Any = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_UpperCamelCase : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_UpperCamelCase : Tuple = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_UpperCamelCase : List[Any] = True
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Tuple = True
_UpperCamelCase : Optional[Any] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_UpperCamelCase : str = [0.8, 0.9]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
lowercase = UMTaModel(config_and_inputs[0] ).to(snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
snake_case , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'''{tmpdirname}/t5_test.onnx''' , export_params=snake_case , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
lowercase = self.model_tester.prepare_config_and_inputs()
lowercase = config_and_inputs[0]
lowercase = UMTaForConditionalGeneration(snake_case ).eval()
model.to(snake_case )
lowercase = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=snake_case ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case ),
}
for attn_name, (name, mask) in zip(snake_case , head_masking.items() ):
lowercase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowercase = torch.ones(
config.num_decoder_layers , config.num_heads , device=snake_case )
lowercase = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=snake_case , return_dict_in_generate=snake_case , **snake_case , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowercase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=snake_case ).to(snake_case )
lowercase = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=snake_case , legacy=snake_case )
lowercase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
lowercase = tokenizer(snake_case , return_tensors='pt' , padding=snake_case ).input_ids
# fmt: off
lowercase = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(snake_case , snake_case )
lowercase = model.generate(input_ids.to(snake_case ) )
lowercase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
lowercase = tokenizer.batch_decode(snake_case )
self.assertEqual(snake_case , snake_case )
| 84 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
A = logging.get_logger(__name__)
A = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class _a ( SCREAMING_SNAKE_CASE__):
__magic_name__ = """bloom"""
__magic_name__ = ["""past_key_values"""]
__magic_name__ = {
"""num_hidden_layers""": """n_layer""",
"""num_attention_heads""": """n_head""",
}
def __init__( self : Tuple , _lowercase : List[Any]=250880 , _lowercase : int=64 , _lowercase : Optional[Any]=2 , _lowercase : Dict=8 , _lowercase : List[str]=1E-5 , _lowercase : List[Any]=0.02 , _lowercase : Optional[Any]=True , _lowercase : Dict=1 , _lowercase : Union[str, Any]=2 , _lowercase : str=False , _lowercase : List[Any]=0.0 , _lowercase : Tuple=0.0 , _lowercase : Dict=1 , _lowercase : int=False , **_lowercase : int , ) -> List[str]:
snake_case : Any = vocab_size
# Backward compatibility with n_embed kwarg
snake_case : Any = kwargs.pop("n_embed" , _lowercase )
snake_case : Tuple = hidden_size if n_embed is None else n_embed
snake_case : Optional[Any] = n_layer
snake_case : Optional[Any] = n_head
snake_case : Union[str, Any] = layer_norm_epsilon
snake_case : int = initializer_range
snake_case : int = use_cache
snake_case : int = pretraining_tp
snake_case : Tuple = apply_residual_connection_post_layernorm
snake_case : Union[str, Any] = hidden_dropout
snake_case : Optional[Any] = attention_dropout
snake_case : List[Any] = bos_token_id
snake_case : Any = eos_token_id
snake_case : Optional[Any] = slow_but_exact
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
class _a ( SCREAMING_SNAKE_CASE__):
__magic_name__ = version.parse("""1.12""")
def __init__( self : int , _lowercase : PretrainedConfig , _lowercase : str = "default" , _lowercase : List[PatchingSpec] = None , _lowercase : bool = False , ) -> Dict:
super().__init__(_lowercase , task=_lowercase , patching_specs=_lowercase , use_past=_lowercase )
if not getattr(self._config , "pad_token_id" , _lowercase ):
# TODO: how to do that better?
snake_case : int = 0
@property
def __lowercase ( self : str ) -> Mapping[str, Mapping[int, str]]:
snake_case : int = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_lowercase , direction="inputs" , inverted_values_shape=_lowercase )
snake_case : Optional[int] = {0: "batch", 1: "past_sequence + sequence"}
else:
snake_case : Dict = {0: "batch", 1: "sequence"}
return common_inputs
@property
def __lowercase ( self : int ) -> int:
return self._config.n_layer
@property
def __lowercase ( self : Dict ) -> int:
return self._config.n_head
@property
def __lowercase ( self : Union[str, Any] ) -> float:
return 1E-3
def __lowercase ( self : List[str] , _lowercase : "PreTrainedTokenizer" , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
snake_case : int = super(_lowercase , self ).generate_dummy_inputs(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
# We need to order the input in the way they appears in the forward()
snake_case : Any = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
snake_case , snake_case : List[str] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
snake_case : Optional[Any] = seqlen + 2
snake_case : Optional[int] = self._config.hidden_size // self.num_attention_heads
snake_case : List[str] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
snake_case : Any = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
snake_case : Dict = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(self.num_layers )
]
snake_case : Tuple = common_inputs["attention_mask"]
if self.use_past:
snake_case : Any = ordered_inputs["attention_mask"].dtype
snake_case : List[str] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
return ordered_inputs
@property
def __lowercase ( self : str ) -> int:
return 13
| 449 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ = random.Random()
def A ( __UpperCamelCase , __UpperCamelCase=1.0 , __UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[int]:
if rng is None:
A__ = global_rng
A__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , _snake_case : Union[str, Any] , _snake_case : str=7 , _snake_case : List[str]=4_00 , _snake_case : Union[str, Any]=20_00 , _snake_case : str=20_48 , _snake_case : int=1_28 , _snake_case : Optional[int]=1 , _snake_case : Dict=5_12 , _snake_case : Tuple=30 , _snake_case : int=4_41_00 , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = min_seq_length
A__ = max_seq_length
A__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ = spectrogram_length
A__ = feature_size
A__ = num_audio_channels
A__ = hop_length
A__ = chunk_length
A__ = sampling_rate
def _a ( self : Tuple ):
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _a ( self : int , _snake_case : Tuple=False , _snake_case : str=False ):
"""simple docstring"""
def _flatten(_snake_case : List[str] ):
return list(itertools.chain(*_snake_case ) )
if equal_length:
A__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ = [np.asarray(_snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[Any] = TvltFeatureExtractor
def _a ( self : str ):
"""simple docstring"""
A__ = TvltFeatureExtractionTester(self )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_snake_case , 'spectrogram_length' ) )
self.assertTrue(hasattr(_snake_case , 'feature_size' ) )
self.assertTrue(hasattr(_snake_case , 'num_audio_channels' ) )
self.assertTrue(hasattr(_snake_case , 'hop_length' ) )
self.assertTrue(hasattr(_snake_case , 'chunk_length' ) )
self.assertTrue(hasattr(_snake_case , 'sampling_rate' ) )
def _a ( self : str ):
"""simple docstring"""
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = feat_extract_first.save_pretrained(_snake_case )[0]
check_json_file_has_correct_format(_snake_case )
A__ = self.feature_extraction_class.from_pretrained(_snake_case )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = dict_first.pop('mel_filters' )
A__ = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(_snake_case , _snake_case ) )
self.assertEqual(_snake_case , _snake_case )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = os.path.join(_snake_case , 'feat_extract.json' )
feat_extract_first.to_json_file(_snake_case )
A__ = self.feature_extraction_class.from_json_file(_snake_case )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = dict_first.pop('mel_filters' )
A__ = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(_snake_case , _snake_case ) )
self.assertEqual(_snake_case , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
A__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
A__ = [np.asarray(_snake_case ) for speech_input in speech_inputs]
# Test not batched input
A__ = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
A__ = feature_extractor(_snake_case , return_tensors='np' , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
A__ = feature_extractor(
_snake_case , return_tensors='np' , sampling_rate=4_41_00 , mask_audio=_snake_case ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
A__ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
A__ = np.asarray(_snake_case )
A__ = feature_extractor(_snake_case , return_tensors='np' , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _a ( self : List[str] , _snake_case : Tuple ):
"""simple docstring"""
A__ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
A__ = ds.sort('id' ).select(range(_snake_case ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _a ( self : Dict ):
"""simple docstring"""
A__ = self._load_datasamples(1 )
A__ = TvltFeatureExtractor()
A__ = feature_extractor(_snake_case , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_92, 1_28) )
A__ = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _snake_case , atol=1E-4 ) )
| 52 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> YolosConfig:
A__ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
A__ = 192
A__ = 768
A__ = 12
A__ = 3
A__ = [800, 1_333]
A__ = False
elif yolos_name == "yolos_s_dWr":
A__ = 330
A__ = 14
A__ = 6
A__ = 1_320
elif "yolos_s" in yolos_name:
A__ = 384
A__ = 1_536
A__ = 12
A__ = 6
elif "yolos_b" in yolos_name:
A__ = [800, 1_344]
A__ = 91
A__ = 'huggingface/label-files'
A__ = 'coco-detection-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> str:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: config.hidden_size, :]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[-config.hidden_size :, :]
A__ = in_proj_bias[-config.hidden_size :]
def A ( __UpperCamelCase ) -> str:
if "backbone" in name:
A__ = name.replace('backbone' , 'vit' )
if "cls_token" in name:
A__ = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
A__ = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
A__ = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
A__ = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
A__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
A__ = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
A__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A__ = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
A__ = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
A__ = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
A__ = name.replace('vit.norm' , 'vit.layernorm' )
return name
def A ( __UpperCamelCase , __UpperCamelCase ) -> dict:
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
A__ = key.split('.' )
A__ = int(key_split[2] )
A__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[
dim : dim * 2, :
]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def A ( ) -> torch.Tensor:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> List[str]:
A__ = get_yolos_config(__UpperCamelCase )
# load original state_dict
A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model']
# load 🤗 model
A__ = YolosForObjectDetection(__UpperCamelCase )
model.eval()
A__ = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
A__ = 800 if yolos_name != 'yolos_ti' else 512
A__ = YolosImageProcessor(format='coco_detection' , size=__UpperCamelCase )
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = model(**__UpperCamelCase )
A__ , A__ = outputs.logits, outputs.pred_boxes
A__ , A__ = None, None
if yolos_name == "yolos_ti":
A__ = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
A__ = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
A__ = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
A__ = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
A__ = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
A__ = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
A__ = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
A__ = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
A__ = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
A__ = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
A__ = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
A__ = model_mapping[yolos_name]
image_processor.push_to_hub(__UpperCamelCase , organization='hustvl' )
model.push_to_hub(__UpperCamelCase , organization='hustvl' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 52 | 1 |
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( __snake_case : List[str] , __snake_case : Any ) -> Optional[int]:
lowercase : Dict = set(UpperCamelCase__ ), [start]
while stack:
lowercase : Any = stack.pop()
explored.add(UpperCamelCase__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(UpperCamelCase__ )
return explored
_A : Optional[int] = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 361 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _snake_case ( a__ ):
lowerCAmelCase :UNetaDModel
lowerCAmelCase :ScoreSdeVeScheduler
def __init__( self , _lowerCamelCase , _lowerCamelCase):
super().__init__()
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase)
@torch.no_grad()
def __call__( self , _lowerCamelCase = 1 , _lowerCamelCase = 2000 , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , **_lowerCamelCase , ):
UpperCAmelCase__ : Union[str, Any] = self.unet.config.sample_size
UpperCAmelCase__ : Any = (batch_size, 3, img_size, img_size)
UpperCAmelCase__ : Optional[int] = self.unet
UpperCAmelCase__ : Any = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase) * self.scheduler.init_noise_sigma
UpperCAmelCase__ : Optional[int] = sample.to(self.device)
self.scheduler.set_timesteps(_lowerCamelCase)
self.scheduler.set_sigmas(_lowerCamelCase)
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
UpperCAmelCase__ : List[str] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device)
# correction step
for _ in range(self.scheduler.config.correct_steps):
UpperCAmelCase__ : List[str] = self.unet(_lowerCamelCase , _lowerCamelCase).sample
UpperCAmelCase__ : List[Any] = self.scheduler.step_correct(_lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase).prev_sample
# prediction step
UpperCAmelCase__ : Any = model(_lowerCamelCase , _lowerCamelCase).sample
UpperCAmelCase__ : Union[str, Any] = self.scheduler.step_pred(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase)
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = output.prev_sample, output.prev_sample_mean
UpperCAmelCase__ : Optional[Any] = sample_mean.clamp(0 , 1)
UpperCAmelCase__ : List[str] = sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
UpperCAmelCase__ : str = self.numpy_to_pil(_lowerCamelCase)
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_lowerCamelCase) | 407 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
A__: List[str] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
A__: Optional[Any] = TaTokenizerFast
A__: Union[str, Any] = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Optional[Any] = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Optional[Any] = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: List[str] = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
A__: List[str] = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 721 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ) -> float:
_a : Union[str, Any] =0
while len(_UpperCAmelCase ) > 1:
_a : Any =0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
_a : Optional[int] =files.index(min(_UpperCAmelCase ) )
temp += files[min_index]
files.pop(_UpperCAmelCase )
files.append(_UpperCAmelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 506 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase_ )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
_lowerCamelCase =field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
_lowerCamelCase =Features({"text": Value("string" )} )
_lowerCamelCase =Features({"labels": ClassLabel} )
_lowerCamelCase ="text"
_lowerCamelCase ="labels"
def __snake_case ( self : str , a__ : str ):
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , a__ ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
UpperCAmelCase = copy.deepcopy(self )
UpperCAmelCase = self.label_schema.copy()
UpperCAmelCase = features[self.label_column]
UpperCAmelCase = label_schema
return task_template
@property
def __snake_case ( self : Dict ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 51 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def UpperCamelCase ( snake_case__ : List[Any] ,snake_case__ : List[str] ):
'''simple docstring'''
__snake_case :int = []
for part_id in partition_order:
__snake_case :int = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(snake_case__ ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :List[str] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
__snake_case :Any = spark.range(100 ).repartition(1 )
__snake_case :int = Spark(snake_case__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :Tuple = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
__snake_case :int = spark.range(10 ).repartition(2 )
__snake_case :str = [1, 0]
__snake_case :List[Any] = _generate_iterable_examples(snake_case__ ,snake_case__ ) # Reverse the partitions.
__snake_case :Union[str, Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ ,snake_case__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__snake_case , __snake_case :Union[str, Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :Union[str, Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
__snake_case :Tuple = spark.range(10 ).repartition(1 )
__snake_case :Dict = SparkExamplesIterable(snake_case__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case__ ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :List[str] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
__snake_case :Union[str, Any] = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
__snake_case :Dict = lambda snake_case__ : x.reverse()
__snake_case :int = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ ,[2, 1, 0] )
__snake_case :Dict = SparkExamplesIterable(snake_case__ ).shuffle_data_sources(snake_case__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case__ ):
__snake_case , __snake_case :List[str] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :Union[str, Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
__snake_case :Tuple = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__snake_case :List[Any] = SparkExamplesIterable(snake_case__ ).shard_data_sources(worker_id=0 ,num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case :Dict = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ ,[0, 2] )
for i, (row_id, row_dict) in enumerate(snake_case__ ):
__snake_case , __snake_case :Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__snake_case :str = SparkExamplesIterable(snake_case__ ).shard_data_sources(worker_id=1 ,num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case :Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ ,[1, 3] )
for i, (row_id, row_dict) in enumerate(snake_case__ ):
__snake_case , __snake_case :Dict = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :Union[str, Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
__snake_case :Tuple = spark.range(100 ).repartition(1 )
__snake_case :Dict = Spark(snake_case__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 455 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCamelCase :
'''simple docstring'''
@staticmethod
def snake_case__ ( *__lowercase , **__lowercase ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def snake_case__ ( self , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
__A : Union[str, Any] = ObjectDetectionPipeline(model=__lowercase , image_processor=__lowercase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def snake_case__ ( self , __lowercase , __lowercase ):
"""simple docstring"""
__A : List[str] = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 )
self.assertGreater(len(__lowercase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__lowercase , {
'score': ANY(__lowercase ),
'label': ANY(__lowercase ),
'box': {'xmin': ANY(__lowercase ), 'ymin': ANY(__lowercase ), 'xmax': ANY(__lowercase ), 'ymax': ANY(__lowercase )},
} , )
import datasets
__A : Optional[int] = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
__A : str = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__A : Optional[Any] = object_detector(__lowercase , threshold=0.0 )
self.assertEqual(len(__lowercase ) , len(__lowercase ) )
for outputs in batch_outputs:
self.assertGreater(len(__lowercase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__lowercase , {
'score': ANY(__lowercase ),
'label': ANY(__lowercase ),
'box': {'xmin': ANY(__lowercase ), 'ymin': ANY(__lowercase ), 'xmax': ANY(__lowercase ), 'ymax': ANY(__lowercase )},
} , )
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def snake_case__ ( self ):
"""simple docstring"""
pass
@require_torch
def snake_case__ ( self ):
"""simple docstring"""
__A : List[str] = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__A : List[str] = AutoModelForObjectDetection.from_pretrained(__lowercase )
__A : List[str] = AutoFeatureExtractor.from_pretrained(__lowercase )
__A : Optional[int] = ObjectDetectionPipeline(model=__lowercase , feature_extractor=__lowercase )
__A : Union[str, Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
] , )
__A : Union[str, Any] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
] , )
@require_torch
@slow
def snake_case__ ( self ):
"""simple docstring"""
__A : List[Any] = 'facebook/detr-resnet-50'
__A : List[str] = AutoModelForObjectDetection.from_pretrained(__lowercase )
__A : int = AutoFeatureExtractor.from_pretrained(__lowercase )
__A : Any = ObjectDetectionPipeline(model=__lowercase , feature_extractor=__lowercase )
__A : Dict = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
__A : Optional[Any] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def snake_case__ ( self ):
"""simple docstring"""
__A : Tuple = 'facebook/detr-resnet-50'
__A : List[Any] = pipeline('object-detection' , model=__lowercase )
__A : Any = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
__A : Optional[int] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def snake_case__ ( self ):
"""simple docstring"""
__A : str = 0.9_9_8_5
__A : Union[str, Any] = 'facebook/detr-resnet-50'
__A : str = pipeline('object-detection' , model=__lowercase )
__A : int = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=__lowercase )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def snake_case__ ( self ):
"""simple docstring"""
__A : Union[str, Any] = 'Narsil/layoutlmv3-finetuned-funsd'
__A : Optional[Any] = 0.9_9_9_3
__A : str = pipeline('object-detection' , model=__lowercase , threshold=__lowercase )
__A : List[str] = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
] , )
| 540 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _lowercase ( UpperCamelCase__ : Optional[int], UpperCamelCase__ : Tuple, UpperCamelCase__ : Tuple, UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[int] ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__A : Optional[int] = TapasConfig.from_json_file(UpperCamelCase__ )
# set absolute/relative position embeddings parameter
__A : List[str] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__A : Optional[int] = TapasForQuestionAnswering(config=UpperCamelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
__A : List[str] = 4
__A : Any = True
# hparam_utils.py hparams
__A : Any = 0.664694
__A : Dict = 0.207951
__A : Optional[Any] = 0.121194
__A : Any = True
__A : Any = True
__A : int = False
__A : Optional[int] = 0.0352513
__A : List[Any] = TapasForQuestionAnswering(config=UpperCamelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__A : Tuple = 4
__A : Optional[int] = False
# hparam_utils.py hparams
__A : List[str] = 36.4519
__A : Union[str, Any] = 0.903421
__A : List[str] = 222.088
__A : Optional[Any] = True
__A : Optional[int] = True
__A : Union[str, Any] = True
__A : str = 0.763141
__A : Union[str, Any] = TapasForQuestionAnswering(config=UpperCamelCase__ )
elif task == "TABFACT":
__A : Union[str, Any] = TapasForSequenceClassification(config=UpperCamelCase__ )
elif task == "MLM":
__A : Any = TapasForMaskedLM(config=UpperCamelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
__A : List[Any] = TapasModel(config=UpperCamelCase__ )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCamelCase__ )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
__A : int = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt', model_max_length=512 )
tokenizer.save_pretrained(UpperCamelCase__ )
print('Used relative position embeddings:', model.config.reset_position_index_per_cell )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase_ : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 540 | 1 |
lowerCAmelCase_ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def __SCREAMING_SNAKE_CASE ():
snake_case_ = input('''Enter message: ''' )
snake_case_ = input('''Enter key [alphanumeric]: ''' )
snake_case_ = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
snake_case_ = '''encrypt'''
snake_case_ = encrypt_message(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif mode.lower().startswith('''d''' ):
snake_case_ = '''decrypt'''
snake_case_ = decrypt_message(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(F'''\n{mode.title()}ed message:''' )
print(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return translate_message(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encrypt''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return translate_message(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decrypt''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = []
snake_case_ = 0
snake_case_ = key.upper()
for symbol in message:
snake_case_ = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(SCREAMING_SNAKE_CASE__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(SCREAMING_SNAKE_CASE__ ):
snake_case_ = 0
else:
translated.append(SCREAMING_SNAKE_CASE__ )
return "".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main() | 39 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __lowercase ( unittest.TestCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = get_activation("""swish""" )
self.assertIsInstance(lowercase__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = get_activation("""silu""" )
self.assertIsInstance(lowercase__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCamelCase (self ):
snake_case_ : Tuple = get_activation("""mish""" )
self.assertIsInstance(lowercase__ , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCamelCase (self ):
snake_case_ : Tuple = get_activation("""gelu""" )
self.assertIsInstance(lowercase__ , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 480 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class _lowerCamelCase( _a ):
lowercase_ : Dict = """roberta-prelayernorm"""
def __init__( self, lowerCamelCase=5_02_65, lowerCamelCase=7_68, lowerCamelCase=12, lowerCamelCase=12, lowerCamelCase=30_72, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=1E-12, lowerCamelCase=1, lowerCamelCase=0, lowerCamelCase=2, lowerCamelCase="absolute", lowerCamelCase=True, lowerCamelCase=None, **lowerCamelCase, ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, **lowerCamelCase)
_lowercase : Optional[Any] = vocab_size
_lowercase : Tuple = hidden_size
_lowercase : int = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Optional[int] = hidden_act
_lowercase : Dict = intermediate_size
_lowercase : Any = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : List[str] = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Tuple = initializer_range
_lowercase : int = layer_norm_eps
_lowercase : Optional[int] = position_embedding_type
_lowercase : Union[str, Any] = use_cache
_lowercase : str = classifier_dropout
class _lowerCamelCase( _a ):
@property
def UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_lowercase : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 354 |
from __future__ import annotations
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple[int, int, int]
SCREAMING_SNAKE_CASE : Any = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
SCREAMING_SNAKE_CASE : Optional[Any] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
SCREAMING_SNAKE_CASE : Tuple = "EGZWVONAHDCLFQMSIPJBYUKXTR"
SCREAMING_SNAKE_CASE : List[str] = "FOBHMDKEXQNRAULPGSJVTYICZW"
SCREAMING_SNAKE_CASE : Dict = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
SCREAMING_SNAKE_CASE : Any = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
SCREAMING_SNAKE_CASE : List[Any] = "RMDJXFUWGISLHVTCQNKYPBEZOA"
SCREAMING_SNAKE_CASE : Dict = "SGLCPQWZHKXAREONTFBVIYJUDM"
SCREAMING_SNAKE_CASE : Optional[int] = "HVSICLTYKQUBXDWAJZOMFGPREN"
SCREAMING_SNAKE_CASE : List[Any] = "RZWQHFMVDBKICJLNTUXAGYPSOE"
SCREAMING_SNAKE_CASE : Optional[int] = "LFKIJODBEGAMQPXVUHYSTCZRWN"
SCREAMING_SNAKE_CASE : Optional[Any] = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(lowerCamelCase_ ) )) < 3:
_lowercase : int = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(lowerCamelCase_ )
# Checks if rotor positions are valid
_lowercase , _lowercase , _lowercase : Union[str, Any] = rotpos
if not 0 < rotorposa <= len(lowerCamelCase_ ):
_lowercase : Union[str, Any] = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(lowerCamelCase_ )
if not 0 < rotorposa <= len(lowerCamelCase_ ):
_lowercase : Tuple = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(lowerCamelCase_ )
if not 0 < rotorposa <= len(lowerCamelCase_ ):
_lowercase : str = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(lowerCamelCase_ )
# Validates string and returns dict
_lowercase : Optional[int] = _plugboard(lowerCamelCase_ )
return rotpos, rotsel, pbdict
def UpperCamelCase_( lowerCamelCase_ ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : List[Any] = F'''Plugboard setting isn\'t type string ({type(lowerCamelCase_ )})'''
raise TypeError(lowerCamelCase_ )
elif len(lowerCamelCase_ ) % 2 != 0:
_lowercase : Optional[Any] = F'''Odd number of symbols ({len(lowerCamelCase_ )})'''
raise Exception(lowerCamelCase_ )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
_lowercase : List[Any] = set()
for i in pbstring:
if i not in abc:
_lowercase : str = F'''\'{i}\' not in list of symbols'''
raise Exception(lowerCamelCase_ )
elif i in tmppbl:
_lowercase : Tuple = F'''Duplicate symbol ({i})'''
raise Exception(lowerCamelCase_ )
else:
tmppbl.add(lowerCamelCase_ )
del tmppbl
# Created the dictionary
_lowercase : List[Any] = {}
for j in range(0 , len(lowerCamelCase_ ) - 1 , 2 ):
_lowercase : str = pbstring[j + 1]
_lowercase : Union[str, Any] = pbstring[j]
return pb
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = (rotora, rotora, rotora) , lowerCamelCase_ = "" , ) -> str:
_lowercase : int = text.upper()
_lowercase , _lowercase , _lowercase : Optional[int] = _validator(
lowerCamelCase_ , lowerCamelCase_ , plugb.upper() )
_lowercase , _lowercase , _lowercase : Union[str, Any] = rotor_position
_lowercase , _lowercase , _lowercase : Optional[Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_lowercase : Any = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_lowercase : List[str] = plugboard[symbol]
# rotor ra --------------------------
_lowercase : Any = abc.index(lowerCamelCase_ ) + rotorposa
_lowercase : Any = rotora[index % len(lowerCamelCase_ )]
# rotor rb --------------------------
_lowercase : List[Any] = abc.index(lowerCamelCase_ ) + rotorposa
_lowercase : Any = rotora[index % len(lowerCamelCase_ )]
# rotor rc --------------------------
_lowercase : Tuple = abc.index(lowerCamelCase_ ) + rotorposa
_lowercase : Tuple = rotora[index % len(lowerCamelCase_ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_lowercase : List[Any] = reflector[symbol]
# 2nd rotors
_lowercase : int = abc[rotora.index(lowerCamelCase_ ) - rotorposa]
_lowercase : str = abc[rotora.index(lowerCamelCase_ ) - rotorposa]
_lowercase : int = abc[rotora.index(lowerCamelCase_ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_lowercase : Optional[int] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowerCamelCase_ ):
_lowercase : Optional[Any] = 0
rotorposa += 1
if rotorposa >= len(lowerCamelCase_ ):
_lowercase : Any = 0
rotorposa += 1
if rotorposa >= len(lowerCamelCase_ ):
_lowercase : Any = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowerCamelCase_ )
return "".join(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = "This is my Python script that emulates the Enigma machine from WWII."
SCREAMING_SNAKE_CASE : Optional[int] = (1, 1, 1)
SCREAMING_SNAKE_CASE : List[Any] = "pictures"
SCREAMING_SNAKE_CASE : int = (rotora, rotora, rotora)
SCREAMING_SNAKE_CASE : List[Any] = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 354 | 1 |
import datasets
from .evaluate import evaluate
lowerCAmelCase = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
lowerCAmelCase = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
lowerCAmelCase = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def _A (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {'id': datasets.Value('string' ), 'prediction_text': datasets.Value('string' )},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= {prediction['id']: prediction['prediction_text'] for prediction in predictions}
__lowercase= [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
__lowercase= evaluate(dataset=lowerCAmelCase , predictions=lowerCAmelCase )
return score
| 230 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= RobertaPreLayerNormConfig.from_pretrained(
lowercase__ , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
__lowercase= torch.load(hf_hub_download(repo_id=lowercase__ , filename='pytorch_model.bin' ) )
__lowercase= {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
__lowercase= 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
__lowercase= tensor_value
__lowercase= RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowercase__ , config=lowercase__ , state_dict=lowercase__ )
model.save_pretrained(lowercase__ )
# convert tokenizer
__lowercase= AutoTokenizer.from_pretrained(lowercase__ )
tokenizer.save_pretrained(lowercase__ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 230 | 1 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case = logging.get_logger(__name__)
snake_case = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class UpperCamelCase ( __UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = '''conditional_detr'''
UpperCAmelCase_ : str = ['''past_key_values''']
UpperCAmelCase_ : Union[str, Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowercase__=True , lowercase__=None , lowercase__=3 , lowercase__=300 , lowercase__=6 , lowercase__=2048 , lowercase__=8 , lowercase__=6 , lowercase__=2048 , lowercase__=8 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=True , lowercase__="relu" , lowercase__=256 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=1.0 , lowercase__=False , lowercase__="sine" , lowercase__="resnet50" , lowercase__=True , lowercase__=False , lowercase__=2 , lowercase__=5 , lowercase__=2 , lowercase__=1 , lowercase__=1 , lowercase__=2 , lowercase__=5 , lowercase__=2 , lowercase__=0.25 , **lowercase__ , ) -> List[str]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
SCREAMING_SNAKE_CASE = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = backbone_config.get('model_type' )
SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE = config_class.from_dict(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = use_timm_backbone
SCREAMING_SNAKE_CASE = backbone_config
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = num_queries
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = encoder_ffn_dim
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = encoder_attention_heads
SCREAMING_SNAKE_CASE = decoder_ffn_dim
SCREAMING_SNAKE_CASE = decoder_layers
SCREAMING_SNAKE_CASE = decoder_attention_heads
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = init_xavier_std
SCREAMING_SNAKE_CASE = encoder_layerdrop
SCREAMING_SNAKE_CASE = decoder_layerdrop
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = auxiliary_loss
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = backbone
SCREAMING_SNAKE_CASE = use_pretrained_backbone
SCREAMING_SNAKE_CASE = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE = class_cost
SCREAMING_SNAKE_CASE = bbox_cost
SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE = mask_loss_coefficient
SCREAMING_SNAKE_CASE = dice_loss_coefficient
SCREAMING_SNAKE_CASE = cls_loss_coefficient
SCREAMING_SNAKE_CASE = bbox_loss_coefficient
SCREAMING_SNAKE_CASE = giou_loss_coefficient
SCREAMING_SNAKE_CASE = focal_alpha
super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def A ( self ) -> Any:
"""simple docstring"""
return self.encoder_attention_heads
@property
def A ( self ) -> Tuple:
"""simple docstring"""
return self.d_model
def A ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
class UpperCamelCase ( __UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase_ : Any = version.parse("1.11" )
@property
def A ( self ) -> int:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def A ( self ) -> Tuple:
"""simple docstring"""
return 1E-5
@property
def A ( self ) -> Union[str, Any]:
"""simple docstring"""
return 12
| 716 |
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE = TaConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(f'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE = TaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 406 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase_ : List[Any] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _lowerCamelCase ( unittest.TestCase ):
__a = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__a = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__a = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__a = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: str= ZeroShotClassificationPipeline(
model=lowerCAmelCase , tokenizer=lowerCAmelCase , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__: List[Any]= classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase )]} )
# No kwarg
SCREAMING_SNAKE_CASE__: int= classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase )]} )
SCREAMING_SNAKE_CASE__: List[Any]= classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase )]} )
SCREAMING_SNAKE_CASE__: Dict= classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
SCREAMING_SNAKE_CASE__: Optional[Any]= classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
SCREAMING_SNAKE_CASE__: Optional[Any]= classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
SCREAMING_SNAKE_CASE__: str= classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
lowerCAmelCase , [
{'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )]}
for i in range(1 )
] , )
SCREAMING_SNAKE_CASE__: Tuple= classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
lowerCAmelCase , [
{'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )]}
for i in range(2 )
] , )
with self.assertRaises(lowerCAmelCase ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(lowerCAmelCase ):
classifier(lowerCAmelCase , candidate_labels='''politics''' )
with self.assertRaises(lowerCAmelCase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(lowerCAmelCase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=lowerCAmelCase )
with self.assertRaises(lowerCAmelCase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(lowerCAmelCase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=lowerCAmelCase , )
self.run_entailment_id(lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[Any]= zero_shot_classifier.model.config
SCREAMING_SNAKE_CASE__: str= config.labelaid
SCREAMING_SNAKE_CASE__: int= zero_shot_classifier.entailment_id
SCREAMING_SNAKE_CASE__: str= {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
SCREAMING_SNAKE_CASE__: Any= {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
SCREAMING_SNAKE_CASE__: List[Any]= {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
SCREAMING_SNAKE_CASE__: Tuple= {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
SCREAMING_SNAKE_CASE__: Dict= original_labelaid
self.assertEqual(lowerCAmelCase , zero_shot_classifier.entailment_id )
@require_torch
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Union[str, Any]= pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[int]= pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
SCREAMING_SNAKE_CASE__: Any= zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: Tuple= pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
SCREAMING_SNAKE_CASE__: List[str]= zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: int= pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
SCREAMING_SNAKE_CASE__: List[Any]= zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
SCREAMING_SNAKE_CASE__: Dict= zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=lowerCAmelCase , )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Any= pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
SCREAMING_SNAKE_CASE__: Tuple= zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
SCREAMING_SNAKE_CASE__: Any= zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=lowerCAmelCase , )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 64 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A_ ( _lowerCAmelCase : int = 5000 ):
"""simple docstring"""
_lowerCamelCase : Dict = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCAmelCase )]
for i, pentagonal_i in enumerate(_lowerCAmelCase ):
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : List[Any] = pentagonal_nums[j]
_lowerCamelCase : Any = pentagonal_i + pentagonal_j
_lowerCamelCase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCAmelCase ) and is_pentagonal(_lowerCAmelCase ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''') | 44 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Dict = logging.get_logger(__name__)
snake_case__ : List[Any] = {
"""weiweishi/roc-bert-base-zh""": """https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json""",
}
class _a ( lowercase_ ):
"""simple docstring"""
A_ = """roc_bert"""
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-12 , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=768 , _UpperCAmelCase=910 , _UpperCAmelCase=512 , _UpperCAmelCase=24858 , _UpperCAmelCase=True , **_UpperCAmelCase , ) -> str:
UpperCamelCase_ = vocab_size
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = initializer_range
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = use_cache
UpperCamelCase_ = enable_pronunciation
UpperCamelCase_ = enable_shape
UpperCamelCase_ = pronunciation_embed_dim
UpperCamelCase_ = pronunciation_vocab_size
UpperCamelCase_ = shape_embed_dim
UpperCamelCase_ = shape_vocab_size
UpperCamelCase_ = concat_input
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = classifier_dropout
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
| 713 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _snake_case (__lowercase , __lowercase , __lowercase = "x" , __lowercase = 10**-10 , __lowercase = 1 , ):
UpperCamelCase_ = symbols(__lowercase)
UpperCamelCase_ = lambdify(__lowercase , __lowercase)
UpperCamelCase_ = lambdify(__lowercase , diff(__lowercase , __lowercase))
UpperCamelCase_ = starting_point
while True:
if diff_function(__lowercase) != 0:
UpperCamelCase_ = prev_guess - multiplicity * func(__lowercase) / diff_function(
__lowercase)
else:
raise ZeroDivisionError('Could not find root') from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess) < precision:
return next_guess
UpperCamelCase_ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(f'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}')
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
f'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
f'{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}',
)
# Find root of cos(x)
print(f'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 618 | 0 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
_lowerCamelCase : Tuple = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
_lowerCamelCase : Optional[int] = {
"""ctrl""": 256,
}
_lowerCamelCase : Tuple = {
"""Pregnancy""": 168629,
"""Christianity""": 7675,
"""Explain""": 106423,
"""Fitness""": 63440,
"""Saving""": 63163,
"""Ask""": 27171,
"""Ass""": 95985,
"""Joke""": 163509,
"""Questions""": 45622,
"""Thoughts""": 49605,
"""Retail""": 52342,
"""Feminism""": 164338,
"""Writing""": 11992,
"""Atheism""": 192263,
"""Netflix""": 48616,
"""Computing""": 39639,
"""Opinion""": 43213,
"""Alone""": 44967,
"""Funny""": 58917,
"""Gaming""": 40358,
"""Human""": 4088,
"""India""": 1331,
"""Joker""": 77138,
"""Diet""": 36206,
"""Legal""": 11859,
"""Norman""": 4939,
"""Tip""": 72689,
"""Weight""": 52343,
"""Movies""": 46273,
"""Running""": 23425,
"""Science""": 2090,
"""Horror""": 37793,
"""Confession""": 60572,
"""Finance""": 12250,
"""Politics""": 16360,
"""Scary""": 191985,
"""Support""": 12654,
"""Technologies""": 32516,
"""Teenage""": 66160,
"""Event""": 32769,
"""Learned""": 67460,
"""Notion""": 182770,
"""Wikipedia""": 37583,
"""Books""": 6665,
"""Extract""": 76050,
"""Confessions""": 102701,
"""Conspiracy""": 75932,
"""Links""": 63674,
"""Narcissus""": 150425,
"""Relationship""": 54766,
"""Relationships""": 134796,
"""Reviews""": 41671,
"""News""": 4256,
"""Translation""": 26820,
"""multilingual""": 128406,
}
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = set()
A__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ = char
A__ = set(lowercase_ )
return pairs
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = CONTROL_CODES
def __init__( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any]="<unk>" , **UpperCAmelCase__ : Any) ->Dict:
'''simple docstring'''
super().__init__(unk_token=UpperCAmelCase__ , **UpperCAmelCase__)
with open(UpperCAmelCase__ , encoding='''utf-8''') as vocab_handle:
A__ = json.load(UpperCAmelCase__)
A__ = {v: k for k, v in self.encoder.items()}
with open(UpperCAmelCase__ , encoding='''utf-8''') as merges_handle:
A__ = merges_handle.read().split('''\n''')[1:-1]
A__ = [tuple(merge.split()) for merge in merges]
A__ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__))))
A__ = {}
@property
def SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
'''simple docstring'''
return len(self.encoder)
def SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : str) ->Any:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A__ = tuple(UpperCAmelCase__)
A__ = tuple(list(word[:-1]) + [word[-1] + '''</w>'''])
A__ = get_pairs(UpperCAmelCase__)
if not pairs:
return token
while True:
A__ = min(UpperCAmelCase__ , key=lambda UpperCAmelCase__: self.bpe_ranks.get(UpperCAmelCase__ , float('''inf''')))
if bigram not in self.bpe_ranks:
break
A__ , A__ = bigram
A__ = []
A__ = 0
while i < len(UpperCAmelCase__):
try:
A__ = word.index(UpperCAmelCase__ , UpperCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
A__ = j
if word[i] == first and i < len(UpperCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
A__ = tuple(UpperCAmelCase__)
A__ = new_word
if len(UpperCAmelCase__) == 1:
break
else:
A__ = get_pairs(UpperCAmelCase__)
A__ = '''@@ '''.join(UpperCAmelCase__)
A__ = word[:-4]
A__ = word
return word
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Optional[Any]) ->Any:
'''simple docstring'''
A__ = []
A__ = re.findall(R'''\S+\n?''' , UpperCAmelCase__)
for token in words:
split_tokens.extend(list(self.bpe(UpperCAmelCase__).split(''' ''')))
return split_tokens
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Tuple) ->Dict:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase__ , self.encoder.get(self.unk_token))
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Optional[int]) ->str:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase__ , self.unk_token)
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Optional[Any]) ->Optional[int]:
'''simple docstring'''
A__ = ''' '''.join(UpperCAmelCase__).replace('''@@ ''' , '''''').strip()
return out_string
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
A__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
A__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''])
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__) + '''\n''')
A__ = 0
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''') as writer:
writer.write('''#version: 0.2\n''')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase__: kv[1]):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''')
A__ = token_index
writer.write(''' '''.join(UpperCAmelCase__) + '''\n''')
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 87 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class UpperCAmelCase_ ( TensorFormatter[Mapping, '''torch.Tensor''', Mapping] ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : str=None , **UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
super().__init__(features=UpperCAmelCase )
lowercase : List[Any] =torch_tensor_kwargs
import torch # noqa import torch at initialization
def A__ ( self : Union[str, Any] , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
import torch
if isinstance(UpperCAmelCase , UpperCAmelCase ) and column:
if all(
isinstance(UpperCAmelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCAmelCase )
return column
def A__ ( self : List[str] , UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
import torch
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase : Any ={}
if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowercase : Tuple ={'''dtype''': torch.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase : int ={'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image ):
lowercase : List[Any] =np.asarray(UpperCAmelCase )
return torch.tensor(UpperCAmelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def A__ ( self : List[Any] , UpperCAmelCase : Dict ) -> Tuple:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCAmelCase , '''__array__''' ) and not isinstance(UpperCAmelCase , torch.Tensor ):
lowercase : List[Any] =data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase )
def A__ ( self : str , UpperCAmelCase : dict ) -> Any:
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : pa.Table ) -> Mapping:
'''simple docstring'''
lowercase : List[str] =self.numpy_arrow_extractor().extract_row(UpperCAmelCase )
lowercase : Any =self.python_features_decoder.decode_row(UpperCAmelCase )
return self.recursive_tensorize(UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : pa.Table ) -> "torch.Tensor":
'''simple docstring'''
lowercase : int =self.numpy_arrow_extractor().extract_column(UpperCAmelCase )
lowercase : Tuple =self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] )
lowercase : Optional[Any] =self.recursive_tensorize(UpperCAmelCase )
lowercase : Any =self._consolidate(UpperCAmelCase )
return column
def A__ ( self : str , UpperCAmelCase : pa.Table ) -> Mapping:
'''simple docstring'''
lowercase : Tuple =self.numpy_arrow_extractor().extract_batch(UpperCAmelCase )
lowercase : List[str] =self.python_features_decoder.decode_batch(UpperCAmelCase )
lowercase : Dict =self.recursive_tensorize(UpperCAmelCase )
for column_name in batch:
lowercase : str =self._consolidate(batch[column_name] )
return batch
| 94 | 0 |
"""simple docstring"""
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =data
def __iter__( self ):
'''simple docstring'''
for element in self.data:
yield element
def _lowerCAmelCase(a : int=True ) -> List[str]:
_SCREAMING_SNAKE_CASE =Accelerator(even_batches=a )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _lowerCAmelCase(a : Accelerator , a : int , a : int , a : bool = False ) -> Tuple:
if iterable:
_SCREAMING_SNAKE_CASE =DummyIterableDataset(torch.as_tensor(range(a ) ) )
else:
_SCREAMING_SNAKE_CASE =TensorDataset(torch.as_tensor(range(a ) ) )
_SCREAMING_SNAKE_CASE =DataLoader(a , batch_size=a )
_SCREAMING_SNAKE_CASE =accelerator.prepare(a )
return dl
def _lowerCAmelCase(a : Accelerator , a : int , a : int , a : List[int] , a : List[int] , ) -> Any:
_SCREAMING_SNAKE_CASE =create_dataloader(accelerator=a , dataset_size=a , batch_size=a )
_SCREAMING_SNAKE_CASE =[len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _lowerCAmelCase() -> List[str]:
_SCREAMING_SNAKE_CASE =create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _lowerCAmelCase() -> Union[str, Any]:
_SCREAMING_SNAKE_CASE =create_accelerator(even_batches=a )
verify_dataloader_batch_sizes(
a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _lowerCAmelCase() -> Union[str, Any]:
_SCREAMING_SNAKE_CASE =create_accelerator(even_batches=a )
_SCREAMING_SNAKE_CASE =torch.nn.Linear(1 , 1 )
_SCREAMING_SNAKE_CASE =accelerator.prepare(a )
_SCREAMING_SNAKE_CASE =create_dataloader(a , dataset_size=3 , batch_size=1 )
_SCREAMING_SNAKE_CASE =[]
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(a ):
_SCREAMING_SNAKE_CASE =ddp_model(batch[0].float() )
_SCREAMING_SNAKE_CASE =output.sum()
loss.backward()
batch_idxs.append(a )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _lowerCAmelCase(a : List[Any] ) -> List[Any]:
with warnings.catch_warnings(record=a ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , a )
assert "only supported for multi-GPU" in str(w[-1].message )
def _lowerCAmelCase() -> Tuple:
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =create_accelerator(even_batches=a )
_SCREAMING_SNAKE_CASE =torch.nn.Linear(1 , 1 )
_SCREAMING_SNAKE_CASE =accelerator.prepare(a )
_SCREAMING_SNAKE_CASE =create_dataloader(a , dataset_size=3 , batch_size=1 )
_SCREAMING_SNAKE_CASE =create_dataloader(a , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
_SCREAMING_SNAKE_CASE =train_dl.batch_sampler.even_batches
_SCREAMING_SNAKE_CASE =valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _lowerCAmelCase() -> List[str]:
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =create_accelerator(even_batches=a )
_SCREAMING_SNAKE_CASE =torch.nn.Linear(1 , 1 )
_SCREAMING_SNAKE_CASE =accelerator.prepare(a )
create_dataloader(a , dataset_size=3 , batch_size=1 , iterable=a )
_SCREAMING_SNAKE_CASE =create_dataloader(a , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('''ignore''' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
_SCREAMING_SNAKE_CASE =batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _lowerCAmelCase() -> Tuple:
_SCREAMING_SNAKE_CASE =create_accelerator()
_SCREAMING_SNAKE_CASE =torch.nn.Linear(1 , 1 )
_SCREAMING_SNAKE_CASE =accelerator.prepare(a )
create_dataloader(a , dataset_size=3 , batch_size=1 , iterable=a )
with warnings.catch_warnings(record=a ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
pass
assert issubclass(w[-1].category , a )
assert "only supported for map-style datasets" in str(w[-1].message )
def _lowerCAmelCase() -> List[str]:
_SCREAMING_SNAKE_CASE =create_accelerator()
accelerator.print('''Test that even_batches variable ensures uniform batches across processes''' )
test_default_ensures_even_batch_sizes()
accelerator.print('''Run tests with even_batches disabled''' )
test_can_disable_even_batches()
accelerator.print('''Test joining uneven inputs''' )
test_can_join_uneven_inputs()
accelerator.print('''Test overriding even_batches when joining uneven inputs''' )
test_join_can_override_even_batches()
accelerator.print('''Test overriding even_batches for mixed dataloader types''' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('''Test overriding even_batches raises a warning for iterable dataloaders''' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('''Test join with non DDP distributed raises warning''' )
_SCREAMING_SNAKE_CASE =accelerator.state.distributed_type
_SCREAMING_SNAKE_CASE =DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(a )
_SCREAMING_SNAKE_CASE =original_state
if __name__ == "__main__":
main()
| 708 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
UpperCAmelCase_ : List[str] = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def _lowerCAmelCase(a : Tuple , a : int , a : Any=None ) -> Any:
if rng is None:
_SCREAMING_SNAKE_CASE =random.Random()
_SCREAMING_SNAKE_CASE =1
for dim in shape:
total_dims *= dim
_SCREAMING_SNAKE_CASE =[]
for _ in range(a ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_SCREAMING_SNAKE_CASE =np.array(a , dtype=jnp.intaa ).reshape(a )
return output
def _lowerCAmelCase(a : Union[str, Any] , a : List[str]=None ) -> int:
_SCREAMING_SNAKE_CASE =ids_tensor(a , vocab_size=2 , rng=a )
# make sure that at least one token is attended to for each batch
_SCREAMING_SNAKE_CASE =1
return attn_mask
@require_flax
class __UpperCAmelCase :
'''simple docstring'''
lowercase : Union[str, Any] = None
lowercase : str = ()
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =inputs['''input_ids'''].shape[-1] // 2
_SCREAMING_SNAKE_CASE =inputs['''input_ids'''][:max_batch_size, :sequence_length]
_SCREAMING_SNAKE_CASE =jnp.ones_like(_A )
_SCREAMING_SNAKE_CASE =attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_SCREAMING_SNAKE_CASE =input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_SCREAMING_SNAKE_CASE =config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =max_length
_SCREAMING_SNAKE_CASE =0
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model_class.__name__[4:] # Skip the "Flax" at the beginning
_SCREAMING_SNAKE_CASE =getattr(_A , _A )
_SCREAMING_SNAKE_CASE =pt_model_class(_A ).eval()
_SCREAMING_SNAKE_CASE =load_flax_weights_in_pytorch_model(_A , flax_model.params )
_SCREAMING_SNAKE_CASE =flax_model.generate(_A ).sequences
_SCREAMING_SNAKE_CASE =pt_model.generate(torch.tensor(_A , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_SCREAMING_SNAKE_CASE =flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =max_length
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =max_length
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =max_length
_SCREAMING_SNAKE_CASE =2
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =max_length
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =2
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =max_length
_SCREAMING_SNAKE_CASE =0.8
_SCREAMING_SNAKE_CASE =1_0
_SCREAMING_SNAKE_CASE =0.3
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =8
_SCREAMING_SNAKE_CASE =9
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE =max_length
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =8
_SCREAMING_SNAKE_CASE =9
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE =max_length
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =8
_SCREAMING_SNAKE_CASE =9
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
# pad attention mask on the left
_SCREAMING_SNAKE_CASE =attention_mask.at[(0, 0)].set(0 )
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =max_length
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A , attention_mask=_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A , attention_mask=_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
# pad attention mask on the left
_SCREAMING_SNAKE_CASE =attention_mask.at[(0, 0)].set(0 )
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =max_length
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A , attention_mask=_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A , attention_mask=_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
# pad attention mask on the left
_SCREAMING_SNAKE_CASE =attention_mask.at[(0, 0)].set(0 )
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =max_length
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A , attention_mask=_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A , attention_mask=_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
_SCREAMING_SNAKE_CASE =FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
_SCREAMING_SNAKE_CASE ='''Hello world'''
_SCREAMING_SNAKE_CASE =tokenizer(_A , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_A , '''do_samples''' ):
model.generate(_A , do_samples=_A )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_A , '''foo''' ):
_SCREAMING_SNAKE_CASE ={'''foo''': '''bar'''}
model.generate(_A , **_A )
| 165 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class _UpperCamelCase ( _lowercase ):
'''simple docstring'''
lowerCamelCase__ ='''open-llama'''
def __init__( self : Union[str, Any] , a : Dict=10_0000 , a : Optional[int]=4096 , a : str=1_1008 , a : str=32 , a : Union[str, Any]=32 , a : str="silu" , a : Tuple=2048 , a : Optional[Any]=0.02 , a : List[Any]=1e-6 , a : Dict=True , a : Optional[Any]=0 , a : str=1 , a : Any=2 , a : Optional[int]=False , a : List[Any]=True , a : Any=0.1 , a : int=0.1 , a : Optional[int]=True , a : Optional[int]=True , a : Optional[Any]=None , **a : str , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = rms_norm_eps
SCREAMING_SNAKE_CASE : str = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop(
"use_memorry_efficient_attention" , __A )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout_prob
SCREAMING_SNAKE_CASE : str = use_stable_embedding
SCREAMING_SNAKE_CASE : Dict = shared_input_output_embedding
SCREAMING_SNAKE_CASE : List[str] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , tie_word_embeddings=__A , **__A , )
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __A ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE : Dict = self.rope_scaling.get("type" , __A )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rope_scaling.get("factor" , __A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__A , __A ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" ) | 25 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Dict = '''unispeech'''
def __init__( self : str , __A : Tuple=32 , __A : List[str]=768 , __A : Dict=12 , __A : Union[str, Any]=12 , __A : Tuple=3072 , __A : Any="gelu" , __A : int=0.1 , __A : Optional[int]=0.1 , __A : List[Any]=0.1 , __A : Any=0.0 , __A : List[str]=0.0 , __A : int=0.1 , __A : List[Any]=0.1 , __A : List[str]=0.0_2 , __A : List[str]=1e-5 , __A : List[Any]="group" , __A : int="gelu" , __A : Any=(512, 512, 512, 512, 512, 512, 512) , __A : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , __A : Tuple=(10, 3, 3, 3, 3, 2, 2) , __A : Optional[int]=False , __A : Any=128 , __A : Union[str, Any]=16 , __A : Optional[Any]=False , __A : str=True , __A : Dict=0.0_5 , __A : Optional[Any]=10 , __A : Dict=2 , __A : int=0.0 , __A : List[str]=10 , __A : str=0 , __A : List[str]=320 , __A : List[Any]=2 , __A : Tuple=0.1 , __A : Optional[int]=100 , __A : Any=256 , __A : Dict=256 , __A : Tuple=0.1 , __A : List[str]="mean" , __A : int=False , __A : List[str]=False , __A : List[Any]=256 , __A : str=80 , __A : Tuple=0 , __A : Tuple=1 , __A : int=2 , __A : Dict=0.5 , **__A : List[Any] , ):
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
__A : Dict = hidden_size
__A : Optional[Any] = feat_extract_norm
__A : List[Any] = feat_extract_activation
__A : str = list(__A )
__A : Optional[Any] = list(__A )
__A : Optional[int] = list(__A )
__A : List[Any] = conv_bias
__A : Optional[int] = num_conv_pos_embeddings
__A : List[Any] = num_conv_pos_embedding_groups
__A : int = len(self.conv_dim )
__A : Optional[Any] = num_hidden_layers
__A : List[str] = intermediate_size
__A : Union[str, Any] = hidden_act
__A : Optional[int] = num_attention_heads
__A : Tuple = hidden_dropout
__A : Optional[Any] = attention_dropout
__A : Union[str, Any] = activation_dropout
__A : Dict = feat_proj_dropout
__A : Optional[int] = final_dropout
__A : Dict = layerdrop
__A : Optional[int] = layer_norm_eps
__A : Optional[Any] = initializer_range
__A : Optional[int] = num_ctc_classes
__A : Dict = vocab_size
__A : List[str] = do_stable_layer_norm
__A : Tuple = use_weighted_layer_sum
__A : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : Tuple = apply_spec_augment
__A : Union[str, Any] = mask_time_prob
__A : Optional[Any] = mask_time_length
__A : List[Any] = mask_time_min_masks
__A : List[Any] = mask_feature_prob
__A : Any = mask_feature_length
__A : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__A : Any = num_codevectors_per_group
__A : Tuple = num_codevector_groups
__A : List[str] = contrastive_logits_temperature
__A : Optional[int] = feat_quantizer_dropout
__A : int = num_negatives
__A : List[str] = codevector_dim
__A : int = proj_codevector_dim
__A : Union[str, Any] = diversity_loss_weight
# ctc loss
__A : List[str] = ctc_loss_reduction
__A : Any = ctc_zero_infinity
# pretraining loss
__A : Union[str, Any] = replace_prob
@property
def lowerCAmelCase_ ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 17 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[str] = ConsistencyModelPipeline
lowerCAmelCase : List[str] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCAmelCase : int = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowerCAmelCase : List[str] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def UpperCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
lowercase__ : int = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' ,subfolder='''test_unet''' ,)
return unet
@property
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ : Any = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' ,subfolder='''test_unet_class_cond''' ,)
return unet
def UpperCAmelCase ( self : Any ,_snake_case : Optional[Any]=False ) -> int:
"""simple docstring"""
if class_cond:
lowercase__ : Any = self.dummy_cond_unet
else:
lowercase__ : int = self.dummy_uncond_unet
# Default to CM multistep sampler
lowercase__ : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.002 ,sigma_max=80.0 ,)
lowercase__ : List[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def UpperCAmelCase ( self : Any ,_snake_case : Tuple ,_snake_case : Optional[int]=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(_snake_case ).startswith('''mps''' ):
lowercase__ : Dict = torch.manual_seed(_snake_case )
else:
lowercase__ : List[Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase__ : Optional[int] = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
lowercase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ : List[Any] = self.get_dummy_components()
lowercase__ : int = ConsistencyModelPipeline(**_snake_case )
lowercase__ : List[str] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
lowercase__ : Union[str, Any] = pipe(**_snake_case ).images
assert image.shape == (1, 32, 32, 3)
lowercase__ : List[Any] = image[0, -3:, -3:, -1]
lowercase__ : Union[str, Any] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ : Optional[Any] = self.get_dummy_components(class_cond=_snake_case )
lowercase__ : Optional[Any] = ConsistencyModelPipeline(**_snake_case )
lowercase__ : Any = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Optional[Any] = self.get_dummy_inputs(_snake_case )
lowercase__ : Any = 0
lowercase__ : int = pipe(**_snake_case ).images
assert image.shape == (1, 32, 32, 3)
lowercase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowercase__ : Any = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ : List[Any] = self.get_dummy_components()
lowercase__ : List[str] = ConsistencyModelPipeline(**_snake_case )
lowercase__ : int = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : List[str] = self.get_dummy_inputs(_snake_case )
lowercase__ : Optional[Any] = 1
lowercase__ : List[Any] = None
lowercase__ : Union[str, Any] = pipe(**_snake_case ).images
assert image.shape == (1, 32, 32, 3)
lowercase__ : Dict = image[0, -3:, -3:, -1]
lowercase__ : Optional[Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ : List[Any] = self.get_dummy_components(class_cond=_snake_case )
lowercase__ : Union[str, Any] = ConsistencyModelPipeline(**_snake_case )
lowercase__ : Any = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Dict = self.get_dummy_inputs(_snake_case )
lowercase__ : Tuple = 1
lowercase__ : Optional[Any] = None
lowercase__ : Optional[Any] = 0
lowercase__ : List[str] = pipe(**_snake_case ).images
assert image.shape == (1, 32, 32, 3)
lowercase__ : Tuple = image[0, -3:, -3:, -1]
lowercase__ : List[Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : List[Any] ,_snake_case : Union[str, Any]=0 ,_snake_case : Optional[Any]=False ,_snake_case : str="cpu" ,_snake_case : List[Any]=torch.floataa ,_snake_case : str=(1, 3, 64, 64) ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = torch.manual_seed(_snake_case )
lowercase__ : List[Any] = {
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
lowercase__ : Any = self.get_fixed_latents(seed=_snake_case ,device=_snake_case ,dtype=_snake_case ,shape=_snake_case )
lowercase__ : List[str] = latents
return inputs
def UpperCAmelCase ( self : Tuple ,_snake_case : Any=0 ,_snake_case : Any="cpu" ,_snake_case : Dict=torch.floataa ,_snake_case : Optional[Any]=(1, 3, 64, 64) ) -> Tuple:
"""simple docstring"""
if type(_snake_case ) == str:
lowercase__ : Union[str, Any] = torch.device(_snake_case )
lowercase__ : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase__ : Dict = randn_tensor(_snake_case ,generator=_snake_case ,device=_snake_case ,dtype=_snake_case )
return latents
def UpperCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
lowercase__ : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.002 ,sigma_max=80.0 ,)
lowercase__ : Optional[Any] = ConsistencyModelPipeline(unet=_snake_case ,scheduler=_snake_case )
pipe.to(torch_device=_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Any = self.get_inputs()
lowercase__ : List[Any] = pipe(**_snake_case ).images
assert image.shape == (1, 64, 64, 3)
lowercase__ : List[Any] = image[0, -3:, -3:, -1]
lowercase__ : int = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : List[str] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
lowercase__ : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.002 ,sigma_max=80.0 ,)
lowercase__ : str = ConsistencyModelPipeline(unet=_snake_case ,scheduler=_snake_case )
pipe.to(torch_device=_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Any = self.get_inputs()
lowercase__ : Any = 1
lowercase__ : Optional[int] = None
lowercase__ : str = pipe(**_snake_case ).images
assert image.shape == (1, 64, 64, 3)
lowercase__ : List[Any] = image[0, -3:, -3:, -1]
lowercase__ : Union[str, Any] = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def UpperCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
lowercase__ : Optional[int] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
lowercase__ : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.002 ,sigma_max=80.0 ,)
lowercase__ : Tuple = ConsistencyModelPipeline(unet=_snake_case ,scheduler=_snake_case )
pipe.to(torch_device=_snake_case ,torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Dict = self.get_inputs(get_fixed_latents=_snake_case ,device=_snake_case )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_snake_case ,enable_math=_snake_case ,enable_mem_efficient=_snake_case ):
lowercase__ : int = pipe(**_snake_case ).images
assert image.shape == (1, 64, 64, 3)
lowercase__ : List[Any] = image[0, -3:, -3:, -1]
lowercase__ : Optional[Any] = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[int] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
lowercase__ : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.002 ,sigma_max=80.0 ,)
lowercase__ : Union[str, Any] = ConsistencyModelPipeline(unet=_snake_case ,scheduler=_snake_case )
pipe.to(torch_device=_snake_case ,torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Any = self.get_inputs(get_fixed_latents=_snake_case ,device=_snake_case )
lowercase__ : Union[str, Any] = 1
lowercase__ : Optional[Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_snake_case ,enable_math=_snake_case ,enable_mem_efficient=_snake_case ):
lowercase__ : List[Any] = pipe(**_snake_case ).images
assert image.shape == (1, 64, 64, 3)
lowercase__ : Union[str, Any] = image[0, -3:, -3:, -1]
lowercase__ : Optional[int] = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 719 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
lowercase__ : Any = OmegaConf.load(__lowerCamelCase )
lowercase__ : Tuple = torch.load(__lowerCamelCase , map_location='''cpu''' )['''model''']
lowercase__ : List[str] = list(state_dict.keys() )
# extract state_dict for VQVAE
lowercase__ : Tuple = {}
lowercase__ : Union[str, Any] = '''first_stage_model.'''
for key in keys:
if key.startswith(__lowerCamelCase ):
lowercase__ : str = state_dict[key]
# extract state_dict for UNetLDM
lowercase__ : Any = {}
lowercase__ : List[Any] = '''model.diffusion_model.'''
for key in keys:
if key.startswith(__lowerCamelCase ):
lowercase__ : Union[str, Any] = state_dict[key]
lowercase__ : Union[str, Any] = config.model.params.first_stage_config.params
lowercase__ : Dict = config.model.params.unet_config.params
lowercase__ : str = VQModel(**__lowerCamelCase ).eval()
vqvae.load_state_dict(__lowerCamelCase )
lowercase__ : List[str] = UNetLDMModel(**__lowerCamelCase ).eval()
unet.load_state_dict(__lowerCamelCase )
lowercase__ : Optional[Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowerCamelCase , )
lowercase__ : Dict = LDMPipeline(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
pipeline.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
lowerCAmelCase_ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 122 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.