repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
transformers
transformers-main/src/transformers/data/metrics/squad_metrics.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Very heavily inspired by the official evaluation script for SQuAD version 2.0 which was modified by XLNet authors to update `find_best_threshold` scripts for SQuAD V2.0 In addition to basic functionality, we also compute additional statistics and plot precision-recall curves if an additional na_prob.json file is provided. This file is expected to map question ID's to the model's predicted probability that a question is unanswerable. """ import collections import json import math import re import string from ...models.bert import BasicTokenizer from ...utils import logging logger = logging.get_logger(__name__) def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" def remove_articles(text): regex = re.compile(r"\b(a|an|the)\b", re.UNICODE) return re.sub(regex, " ", text) def white_space_fix(text): return " ".join(text.split()) def remove_punc(text): exclude = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s)))) def get_tokens(s): if not s: return [] return normalize_answer(s).split() def compute_exact(a_gold, a_pred): return int(normalize_answer(a_gold) == normalize_answer(a_pred)) def compute_f1(a_gold, a_pred): gold_toks = get_tokens(a_gold) pred_toks = get_tokens(a_pred) common = collections.Counter(gold_toks) & collections.Counter(pred_toks) num_same = sum(common.values()) if len(gold_toks) == 0 or len(pred_toks) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks) if num_same == 0: return 0 precision = 1.0 * num_same / len(pred_toks) recall = 1.0 * num_same / len(gold_toks) f1 = (2 * precision * recall) / (precision + recall) return f1 def get_raw_scores(examples, preds): """ Computes the exact and f1 scores from the examples and the model predictions """ exact_scores = {} f1_scores = {} for example in examples: qas_id = example.qas_id gold_answers = [answer["text"] for answer in example.answers if normalize_answer(answer["text"])] if not gold_answers: # For unanswerable questions, only correct answer is empty string gold_answers = [""] if qas_id not in preds: print(f"Missing prediction for {qas_id}") continue prediction = preds[qas_id] exact_scores[qas_id] = max(compute_exact(a, prediction) for a in gold_answers) f1_scores[qas_id] = max(compute_f1(a, prediction) for a in gold_answers) return exact_scores, f1_scores def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh): new_scores = {} for qid, s in scores.items(): pred_na = na_probs[qid] > na_prob_thresh if pred_na: new_scores[qid] = float(not qid_to_has_ans[qid]) else: new_scores[qid] = s return new_scores def make_eval_dict(exact_scores, f1_scores, qid_list=None): if not qid_list: total = len(exact_scores) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores.values()) / total), ("f1", 100.0 * sum(f1_scores.values()) / total), ("total", total), ] ) else: total = len(qid_list) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total), ("f1", 100.0 * sum(f1_scores[k] for k in qid_list) / total), ("total", total), ] ) def merge_eval(main_eval, new_eval, prefix): for k in new_eval: main_eval[f"{prefix}_{k}"] = new_eval[k] def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans): num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) cur_score = num_no_ans best_score = cur_score best_thresh = 0.0 qid_list = sorted(na_probs, key=lambda k: na_probs[k]) for i, qid in enumerate(qid_list): if qid not in scores: continue if qid_to_has_ans[qid]: diff = scores[qid] else: if preds[qid]: diff = -1 else: diff = 0 cur_score += diff if cur_score > best_score: best_score = cur_score best_thresh = na_probs[qid] has_ans_score, has_ans_cnt = 0, 0 for qid in qid_list: if not qid_to_has_ans[qid]: continue has_ans_cnt += 1 if qid not in scores: continue has_ans_score += scores[qid] return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt def find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans): best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans) best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans) main_eval["best_exact"] = best_exact main_eval["best_exact_thresh"] = exact_thresh main_eval["best_f1"] = best_f1 main_eval["best_f1_thresh"] = f1_thresh main_eval["has_ans_exact"] = has_ans_exact main_eval["has_ans_f1"] = has_ans_f1 def find_best_thresh(preds, scores, na_probs, qid_to_has_ans): num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) cur_score = num_no_ans best_score = cur_score best_thresh = 0.0 qid_list = sorted(na_probs, key=lambda k: na_probs[k]) for _, qid in enumerate(qid_list): if qid not in scores: continue if qid_to_has_ans[qid]: diff = scores[qid] else: if preds[qid]: diff = -1 else: diff = 0 cur_score += diff if cur_score > best_score: best_score = cur_score best_thresh = na_probs[qid] return 100.0 * best_score / len(scores), best_thresh def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans): best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans) best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans) main_eval["best_exact"] = best_exact main_eval["best_exact_thresh"] = exact_thresh main_eval["best_f1"] = best_f1 main_eval["best_f1_thresh"] = f1_thresh def squad_evaluate(examples, preds, no_answer_probs=None, no_answer_probability_threshold=1.0): qas_id_to_has_answer = {example.qas_id: bool(example.answers) for example in examples} has_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if has_answer] no_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if not has_answer] if no_answer_probs is None: no_answer_probs = {k: 0.0 for k in preds} exact, f1 = get_raw_scores(examples, preds) exact_threshold = apply_no_ans_threshold( exact, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold ) f1_threshold = apply_no_ans_threshold(f1, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold) evaluation = make_eval_dict(exact_threshold, f1_threshold) if has_answer_qids: has_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=has_answer_qids) merge_eval(evaluation, has_ans_eval, "HasAns") if no_answer_qids: no_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=no_answer_qids) merge_eval(evaluation, no_ans_eval, "NoAns") if no_answer_probs: find_all_best_thresh(evaluation, preds, exact, f1, no_answer_probs, qas_id_to_has_answer) return evaluation def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False): """Project the tokenized prediction back to the original text.""" # When we created the data, we kept track of the alignment between original # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So # now `orig_text` contains the span of our original text corresponding to the # span that we predicted. # # However, `orig_text` may contain extra characters that we don't want in # our prediction. # # For example, let's say: # pred_text = steve smith # orig_text = Steve Smith's # # We don't want to return `orig_text` because it contains the extra "'s". # # We don't want to return `pred_text` because it's already been normalized # (the SQuAD eval script also does punctuation stripping/lower casing but # our tokenizer does additional normalization like stripping accent # characters). # # What we really want to return is "Steve Smith". # # Therefore, we have to apply a semi-complicated alignment heuristic between # `pred_text` and `orig_text` to get a character-to-character alignment. This # can fail in certain cases in which case we just return `orig_text`. def _strip_spaces(text): ns_chars = [] ns_to_s_map = collections.OrderedDict() for i, c in enumerate(text): if c == " ": continue ns_to_s_map[len(ns_chars)] = i ns_chars.append(c) ns_text = "".join(ns_chars) return (ns_text, ns_to_s_map) # We first tokenize `orig_text`, strip whitespace from the result # and `pred_text`, and check if they are the same length. If they are # NOT the same length, the heuristic has failed. If they are the same # length, we assume the characters are one-to-one aligned. tokenizer = BasicTokenizer(do_lower_case=do_lower_case) tok_text = " ".join(tokenizer.tokenize(orig_text)) start_position = tok_text.find(pred_text) if start_position == -1: if verbose_logging: logger.info(f"Unable to find text: '{pred_text}' in '{orig_text}'") return orig_text end_position = start_position + len(pred_text) - 1 (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) if len(orig_ns_text) != len(tok_ns_text): if verbose_logging: logger.info(f"Length not equal after stripping spaces: '{orig_ns_text}' vs '{tok_ns_text}'") return orig_text # We then project the characters in `pred_text` back to `orig_text` using # the character-to-character alignment. tok_s_to_ns_map = {} for i, tok_index in tok_ns_to_s_map.items(): tok_s_to_ns_map[tok_index] = i orig_start_position = None if start_position in tok_s_to_ns_map: ns_start_position = tok_s_to_ns_map[start_position] if ns_start_position in orig_ns_to_s_map: orig_start_position = orig_ns_to_s_map[ns_start_position] if orig_start_position is None: if verbose_logging: logger.info("Couldn't map start position") return orig_text orig_end_position = None if end_position in tok_s_to_ns_map: ns_end_position = tok_s_to_ns_map[end_position] if ns_end_position in orig_ns_to_s_map: orig_end_position = orig_ns_to_s_map[ns_end_position] if orig_end_position is None: if verbose_logging: logger.info("Couldn't map end position") return orig_text output_text = orig_text[orig_start_position : (orig_end_position + 1)] return output_text def _get_best_indexes(logits, n_best_size): """Get the n-best logits from a list.""" index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) best_indexes = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) return best_indexes def _compute_softmax(scores): """Compute softmax probability over raw logits.""" if not scores: return [] max_score = None for score in scores: if max_score is None or score > max_score: max_score = score exp_scores = [] total_sum = 0.0 for score in scores: x = math.exp(score - max_score) exp_scores.append(x) total_sum += x probs = [] for score in exp_scores: probs.append(score / total_sum) return probs def compute_predictions_logits( all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, verbose_logging, version_2_with_negative, null_score_diff_threshold, tokenizer, ): """Write final predictions to the json file and log-odds of null if needed.""" if output_prediction_file: logger.info(f"Writing predictions to: {output_prediction_file}") if output_nbest_file: logger.info(f"Writing nbest to: {output_nbest_file}") if output_null_log_odds_file and version_2_with_negative: logger.info(f"Writing null_log_odds to: {output_null_log_odds_file}") example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"] ) all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() for example_index, example in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 score_null = 1000000 # large and positive min_null_feature_index = 0 # the paragraph slice with min null score null_start_logit = 0 # the start logit at the slice with min null score null_end_logit = 0 # the end logit at the slice with min null score for feature_index, feature in enumerate(features): result = unique_id_to_result[feature.unique_id] start_indexes = _get_best_indexes(result.start_logits, n_best_size) end_indexes = _get_best_indexes(result.end_logits, n_best_size) # if we could have irrelevant answers, get the min score of irrelevant if version_2_with_negative: feature_null_score = result.start_logits[0] + result.end_logits[0] if feature_null_score < score_null: score_null = feature_null_score min_null_feature_index = feature_index null_start_logit = result.start_logits[0] null_end_logit = result.end_logits[0] for start_index in start_indexes: for end_index in end_indexes: # We could hypothetically create invalid predictions, e.g., predict # that the start of the span is in the question. We throw out all # invalid predictions. if start_index >= len(feature.tokens): continue if end_index >= len(feature.tokens): continue if start_index not in feature.token_to_orig_map: continue if end_index not in feature.token_to_orig_map: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue prelim_predictions.append( _PrelimPrediction( feature_index=feature_index, start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index], ) ) if version_2_with_negative: prelim_predictions.append( _PrelimPrediction( feature_index=min_null_feature_index, start_index=0, end_index=0, start_logit=null_start_logit, end_logit=null_end_logit, ) ) prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True) _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name "NbestPrediction", ["text", "start_logit", "end_logit"] ) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] if pred.start_index > 0: # this is a non-null prediction tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)] tok_text = tokenizer.convert_tokens_to_string(tok_tokens) # tok_text = " ".join(tok_tokens) # # # De-tokenize WordPieces that have been split off. # tok_text = tok_text.replace(" ##", "") # tok_text = tok_text.replace("##", "") # Clean whitespace tok_text = tok_text.strip() tok_text = " ".join(tok_text.split()) orig_text = " ".join(orig_tokens) final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging) if final_text in seen_predictions: continue seen_predictions[final_text] = True else: final_text = "" seen_predictions[final_text] = True nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit)) # if we didn't include the empty option in the n-best, include it if version_2_with_negative: if "" not in seen_predictions: nbest.append(_NbestPrediction(text="", start_logit=null_start_logit, end_logit=null_end_logit)) # In very rare edge cases we could only have single null prediction. # So we just create a nonce prediction in this case to avoid failure. if len(nbest) == 1: nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) if len(nbest) < 1: raise ValueError("No valid predictions") total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_logit + entry.end_logit) if not best_non_null_entry: if entry.text: best_non_null_entry = entry probs = _compute_softmax(total_scores) nbest_json = [] for i, entry in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_logit"] = entry.start_logit output["end_logit"] = entry.end_logit nbest_json.append(output) if len(nbest_json) < 1: raise ValueError("No valid predictions") if not version_2_with_negative: all_predictions[example.qas_id] = nbest_json[0]["text"] else: # predict "" iff the null score - the score of best non-null > threshold score_diff = score_null - best_non_null_entry.start_logit - (best_non_null_entry.end_logit) scores_diff_json[example.qas_id] = score_diff if score_diff > null_score_diff_threshold: all_predictions[example.qas_id] = "" else: all_predictions[example.qas_id] = best_non_null_entry.text all_nbest_json[example.qas_id] = nbest_json if output_prediction_file: with open(output_prediction_file, "w") as writer: writer.write(json.dumps(all_predictions, indent=4) + "\n") if output_nbest_file: with open(output_nbest_file, "w") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + "\n") if output_null_log_odds_file and version_2_with_negative: with open(output_null_log_odds_file, "w") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + "\n") return all_predictions def compute_predictions_log_probs( all_examples, all_features, all_results, n_best_size, max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, start_n_top, end_n_top, version_2_with_negative, tokenizer, verbose_logging, ): """ XLNet write prediction logic (more complex than Bert's). Write final predictions to the json file and log-odds of null if needed. Requires utils_squad_evaluate.py """ _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_log_prob", "end_log_prob"] ) _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name "NbestPrediction", ["text", "start_log_prob", "end_log_prob"] ) logger.info(f"Writing predictions to: {output_prediction_file}") example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() for example_index, example in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 score_null = 1000000 # large and positive for feature_index, feature in enumerate(features): result = unique_id_to_result[feature.unique_id] cur_null_score = result.cls_logits # if we could have irrelevant answers, get the min score of irrelevant score_null = min(score_null, cur_null_score) for i in range(start_n_top): for j in range(end_n_top): start_log_prob = result.start_logits[i] start_index = result.start_top_index[i] j_index = i * end_n_top + j end_log_prob = result.end_logits[j_index] end_index = result.end_top_index[j_index] # We could hypothetically create invalid predictions, e.g., predict # that the start of the span is in the question. We throw out all # invalid predictions. if start_index >= feature.paragraph_len - 1: continue if end_index >= feature.paragraph_len - 1: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue prelim_predictions.append( _PrelimPrediction( feature_index=feature_index, start_index=start_index, end_index=end_index, start_log_prob=start_log_prob, end_log_prob=end_log_prob, ) ) prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_log_prob + x.end_log_prob), reverse=True ) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] # XLNet un-tokenizer # Let's keep it simple for now and see if we need all this later. # # tok_start_to_orig_index = feature.tok_start_to_orig_index # tok_end_to_orig_index = feature.tok_end_to_orig_index # start_orig_pos = tok_start_to_orig_index[pred.start_index] # end_orig_pos = tok_end_to_orig_index[pred.end_index] # paragraph_text = example.paragraph_text # final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip() # Previously used Bert untokenizer tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)] tok_text = tokenizer.convert_tokens_to_string(tok_tokens) # Clean whitespace tok_text = tok_text.strip() tok_text = " ".join(tok_text.split()) orig_text = " ".join(orig_tokens) if hasattr(tokenizer, "do_lower_case"): do_lower_case = tokenizer.do_lower_case else: do_lower_case = tokenizer.do_lowercase_and_remove_accent final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging) if final_text in seen_predictions: continue seen_predictions[final_text] = True nbest.append( _NbestPrediction(text=final_text, start_log_prob=pred.start_log_prob, end_log_prob=pred.end_log_prob) ) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append(_NbestPrediction(text="", start_log_prob=-1e6, end_log_prob=-1e6)) total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_log_prob + entry.end_log_prob) if not best_non_null_entry: best_non_null_entry = entry probs = _compute_softmax(total_scores) nbest_json = [] for i, entry in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_log_prob"] = entry.start_log_prob output["end_log_prob"] = entry.end_log_prob nbest_json.append(output) if len(nbest_json) < 1: raise ValueError("No valid predictions") if best_non_null_entry is None: raise ValueError("No valid predictions") score_diff = score_null scores_diff_json[example.qas_id] = score_diff # note(zhiliny): always predict best_non_null_entry # and the evaluation script will search for the best threshold all_predictions[example.qas_id] = best_non_null_entry.text all_nbest_json[example.qas_id] = nbest_json with open(output_prediction_file, "w") as writer: writer.write(json.dumps(all_predictions, indent=4) + "\n") with open(output_nbest_file, "w") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + "\n") if version_2_with_negative: with open(output_null_log_odds_file, "w") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + "\n") return all_predictions
29,698
37.026889
117
py
transformers
transformers-main/src/transformers/data/metrics/__init__.py
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import f1_score, matthews_corrcoef DEPRECATION_WARNING = ( "This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate " "library. You can have a look at this example script for pointers: " "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" ) def simple_accuracy(preds, labels): warnings.warn(DEPRECATION_WARNING, FutureWarning) requires_backends(simple_accuracy, "sklearn") return (preds == labels).mean() def acc_and_f1(preds, labels): warnings.warn(DEPRECATION_WARNING, FutureWarning) requires_backends(acc_and_f1, "sklearn") acc = simple_accuracy(preds, labels) f1 = f1_score(y_true=labels, y_pred=preds) return { "acc": acc, "f1": f1, "acc_and_f1": (acc + f1) / 2, } def pearson_and_spearman(preds, labels): warnings.warn(DEPRECATION_WARNING, FutureWarning) requires_backends(pearson_and_spearman, "sklearn") pearson_corr = pearsonr(preds, labels)[0] spearman_corr = spearmanr(preds, labels)[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def glue_compute_metrics(task_name, preds, labels): warnings.warn(DEPRECATION_WARNING, FutureWarning) requires_backends(glue_compute_metrics, "sklearn") assert len(preds) == len(labels), f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}" if task_name == "cola": return {"mcc": matthews_corrcoef(labels, preds)} elif task_name == "sst-2": return {"acc": simple_accuracy(preds, labels)} elif task_name == "mrpc": return acc_and_f1(preds, labels) elif task_name == "sts-b": return pearson_and_spearman(preds, labels) elif task_name == "qqp": return acc_and_f1(preds, labels) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(preds, labels)} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(preds, labels)} elif task_name == "qnli": return {"acc": simple_accuracy(preds, labels)} elif task_name == "rte": return {"acc": simple_accuracy(preds, labels)} elif task_name == "wnli": return {"acc": simple_accuracy(preds, labels)} elif task_name == "hans": return {"acc": simple_accuracy(preds, labels)} else: raise KeyError(task_name) def xnli_compute_metrics(task_name, preds, labels): warnings.warn(DEPRECATION_WARNING, FutureWarning) requires_backends(xnli_compute_metrics, "sklearn") if len(preds) != len(labels): raise ValueError(f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}") if task_name == "xnli": return {"acc": simple_accuracy(preds, labels)} else: raise KeyError(task_name)
3,629
35.666667
118
py
transformers
transformers-main/src/transformers/data/datasets/glue.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures logger = logging.get_logger(__name__) @dataclass class GlueDataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ task_name: str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys())}) data_dir: str = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) max_seq_length: int = field( default=128, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) def __post_init__(self): self.task_name = self.task_name.lower() class Split(Enum): train = "train" dev = "dev" test = "test" class GlueDataset(Dataset): """ This will be superseded by a framework-agnostic approach soon. """ args: GlueDataTrainingArguments output_mode: str features: List[InputFeatures] def __init__( self, args: GlueDataTrainingArguments, tokenizer: PreTrainedTokenizerBase, limit_length: Optional[int] = None, mode: Union[str, Split] = Split.train, cache_dir: Optional[str] = None, ): warnings.warn( "This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets " "library. You can have a look at this example script for pointers: " "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py", FutureWarning, ) self.args = args self.processor = glue_processors[args.task_name]() self.output_mode = glue_output_modes[args.task_name] if isinstance(mode, str): try: mode = Split[mode] except KeyError: raise KeyError("mode is not a valid split name") # Load data features from cache or dataset file cached_features_file = os.path.join( cache_dir if cache_dir is not None else args.data_dir, f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}", ) label_list = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) label_list[1], label_list[2] = label_list[2], label_list[1] self.label_list = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lock_path = cached_features_file + ".lock" with FileLock(lock_path): if os.path.exists(cached_features_file) and not args.overwrite_cache: start = time.time() self.features = torch.load(cached_features_file) logger.info( f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start ) else: logger.info(f"Creating features from dataset file at {args.data_dir}") if mode == Split.dev: examples = self.processor.get_dev_examples(args.data_dir) elif mode == Split.test: examples = self.processor.get_test_examples(args.data_dir) else: examples = self.processor.get_train_examples(args.data_dir) if limit_length is not None: examples = examples[:limit_length] self.features = glue_convert_examples_to_features( examples, tokenizer, max_length=args.max_seq_length, label_list=label_list, output_mode=self.output_mode, ) start = time.time() torch.save(self.features, cached_features_file) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def __len__(self): return len(self.features) def __getitem__(self, i) -> InputFeatures: return self.features[i] def get_labels(self): return self.label_list
6,160
37.030864
119
py
transformers
transformers-main/src/transformers/data/datasets/squad.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features logger = logging.get_logger(__name__) MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class SquadDataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ model_type: str = field( default=None, metadata={"help": "Model type selected in the list: " + ", ".join(MODEL_TYPES)} ) data_dir: str = field( default=None, metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} ) max_seq_length: int = field( default=128, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) doc_stride: int = field( default=128, metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."}, ) max_query_length: int = field( default=64, metadata={ "help": ( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ) }, ) max_answer_length: int = field( default=30, metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) version_2_with_negative: bool = field( default=False, metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} ) null_score_diff_threshold: float = field( default=0.0, metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} ) n_best_size: int = field( default=20, metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} ) lang_id: int = field( default=0, metadata={ "help": ( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ) }, ) threads: int = field(default=1, metadata={"help": "multiple threads for converting example to features"}) class Split(Enum): train = "train" dev = "dev" class SquadDataset(Dataset): """ This will be superseded by a framework-agnostic approach soon. """ args: SquadDataTrainingArguments features: List[SquadFeatures] mode: Split is_language_sensitive: bool def __init__( self, args: SquadDataTrainingArguments, tokenizer: PreTrainedTokenizer, limit_length: Optional[int] = None, mode: Union[str, Split] = Split.train, is_language_sensitive: Optional[bool] = False, cache_dir: Optional[str] = None, dataset_format: Optional[str] = "pt", ): self.args = args self.is_language_sensitive = is_language_sensitive self.processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor() if isinstance(mode, str): try: mode = Split[mode] except KeyError: raise KeyError("mode is not a valid split name") self.mode = mode # Load data features from cache or dataset file version_tag = "v2" if args.version_2_with_negative else "v1" cached_features_file = os.path.join( cache_dir if cache_dir is not None else args.data_dir, f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}", ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lock_path = cached_features_file + ".lock" with FileLock(lock_path): if os.path.exists(cached_features_file) and not args.overwrite_cache: start = time.time() self.old_features = torch.load(cached_features_file) # Legacy cache files have only features, while new cache files # will have dataset and examples also. self.features = self.old_features["features"] self.dataset = self.old_features.get("dataset", None) self.examples = self.old_features.get("examples", None) logger.info( f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in" " future run" ) else: if mode == Split.dev: self.examples = self.processor.get_dev_examples(args.data_dir) else: self.examples = self.processor.get_train_examples(args.data_dir) self.features, self.dataset = squad_convert_examples_to_features( examples=self.examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=dataset_format, ) start = time.time() torch.save( {"features": self.features, "dataset": self.dataset, "examples": self.examples}, cached_features_file, ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def __len__(self): return len(self.features) def __getitem__(self, i) -> Dict[str, torch.Tensor]: # Convert to Tensors and build dataset feature = self.features[i] input_ids = torch.tensor(feature.input_ids, dtype=torch.long) attention_mask = torch.tensor(feature.attention_mask, dtype=torch.long) token_type_ids = torch.tensor(feature.token_type_ids, dtype=torch.long) cls_index = torch.tensor(feature.cls_index, dtype=torch.long) p_mask = torch.tensor(feature.p_mask, dtype=torch.float) is_impossible = torch.tensor(feature.is_impossible, dtype=torch.float) inputs = { "input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": cls_index, "p_mask": p_mask}) if self.args.version_2_with_negative: inputs.update({"is_impossible": is_impossible}) if self.is_language_sensitive: inputs.update({"langs": (torch.ones(input_ids.shape, dtype=torch.int64) * self.args.lang_id)}) if self.mode == Split.train: start_positions = torch.tensor(feature.start_position, dtype=torch.long) end_positions = torch.tensor(feature.end_position, dtype=torch.long) inputs.update({"start_positions": start_positions, "end_positions": end_positions}) return inputs
9,219
39.086957
118
py
transformers
transformers-main/src/transformers/data/datasets/language_modeling.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import pickle import random import time import warnings from typing import Dict, List, Optional import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) DEPRECATION_WARNING = ( "This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets " "library. You can have a look at this example script for pointers: {0}" ) class TextDataset(Dataset): """ This will be superseded by a framework-agnostic approach soon. """ def __init__( self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, overwrite_cache=False, cache_dir: Optional[str] = None, ): warnings.warn( DEPRECATION_WARNING.format( "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py" ), FutureWarning, ) if os.path.isfile(file_path) is False: raise ValueError(f"Input file path {file_path} not found") block_size = block_size - tokenizer.num_special_tokens_to_add(pair=False) directory, filename = os.path.split(file_path) cached_features_file = os.path.join( cache_dir if cache_dir is not None else directory, f"cached_lm_{tokenizer.__class__.__name__}_{block_size}_{filename}", ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lock_path = cached_features_file + ".lock" with FileLock(lock_path): if os.path.exists(cached_features_file) and not overwrite_cache: start = time.time() with open(cached_features_file, "rb") as handle: self.examples = pickle.load(handle) logger.info( f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start ) else: logger.info(f"Creating features from dataset file at {directory}") self.examples = [] with open(file_path, encoding="utf-8") as f: text = f.read() tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text)) for i in range(0, len(tokenized_text) - block_size + 1, block_size): # Truncate in block of block_size self.examples.append( tokenizer.build_inputs_with_special_tokens(tokenized_text[i : i + block_size]) ) # Note that we are losing the last truncated example here for the sake of simplicity (no padding) # If your dataset is small, first you should look for a bigger one :-) and second you # can change this behavior by adding (model specific) padding. start = time.time() with open(cached_features_file, "wb") as handle: pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL) logger.info( f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def __len__(self): return len(self.examples) def __getitem__(self, i) -> torch.Tensor: return torch.tensor(self.examples[i], dtype=torch.long) class LineByLineTextDataset(Dataset): """ This will be superseded by a framework-agnostic approach soon. """ def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int): warnings.warn( DEPRECATION_WARNING.format( "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py" ), FutureWarning, ) if os.path.isfile(file_path) is False: raise ValueError(f"Input file path {file_path} not found") # Here, we do not cache the features, operating under the assumption # that we will soon use fast multithreaded tokenizers from the # `tokenizers` repo everywhere =) logger.info(f"Creating features from dataset file at {file_path}") with open(file_path, encoding="utf-8") as f: lines = [line for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())] batch_encoding = tokenizer(lines, add_special_tokens=True, truncation=True, max_length=block_size) self.examples = batch_encoding["input_ids"] self.examples = [{"input_ids": torch.tensor(e, dtype=torch.long)} for e in self.examples] def __len__(self): return len(self.examples) def __getitem__(self, i) -> Dict[str, torch.tensor]: return self.examples[i] class LineByLineWithRefDataset(Dataset): """ This will be superseded by a framework-agnostic approach soon. """ def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, ref_path: str): warnings.warn( DEPRECATION_WARNING.format( "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm_wwm.py" ), FutureWarning, ) if os.path.isfile(file_path) is False: raise ValueError(f"Input file path {file_path} not found") if os.path.isfile(ref_path) is False: raise ValueError(f"Ref file path {file_path} not found") # Here, we do not cache the features, operating under the assumption # that we will soon use fast multithreaded tokenizers from the # `tokenizers` repo everywhere =) logger.info(f"Creating features from dataset file at {file_path}") logger.info(f"Use ref segment results at {ref_path}") with open(file_path, encoding="utf-8") as f: data = f.readlines() # use this method to avoid delimiter '\u2029' to split a line data = [line.strip() for line in data if len(line) > 0 and not line.isspace()] # Get ref inf from file with open(ref_path, encoding="utf-8") as f: ref = [json.loads(line) for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())] if len(data) != len(ref): raise ValueError( f"Length of Input file should be equal to Ref file. But the length of {file_path} is {len(data)} " f"while length of {ref_path} is {len(ref)}" ) batch_encoding = tokenizer(data, add_special_tokens=True, truncation=True, max_length=block_size) self.examples = batch_encoding["input_ids"] self.examples = [{"input_ids": torch.tensor(e, dtype=torch.long)} for e in self.examples] n = len(self.examples) for i in range(n): self.examples[i]["chinese_ref"] = torch.tensor(ref[i], dtype=torch.long) def __len__(self): return len(self.examples) def __getitem__(self, i) -> Dict[str, torch.tensor]: return self.examples[i] class LineByLineWithSOPTextDataset(Dataset): """ Dataset for sentence order prediction task, prepare sentence pairs for SOP task """ def __init__(self, tokenizer: PreTrainedTokenizer, file_dir: str, block_size: int): warnings.warn( DEPRECATION_WARNING.format( "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py" ), FutureWarning, ) if os.path.isdir(file_dir) is False: raise ValueError(f"{file_dir} is not a directory") logger.info(f"Creating features from dataset file folder at {file_dir}") self.examples = [] # TODO: randomness could apply a random seed, ex. rng = random.Random(random_seed) # file path looks like ./dataset/wiki_1, ./dataset/wiki_2 for file_name in os.listdir(file_dir): file_path = os.path.join(file_dir, file_name) if os.path.isfile(file_path) is False: raise ValueError(f"{file_path} is not a file") article_open = False with open(file_path, encoding="utf-8") as f: original_lines = f.readlines() article_lines = [] for line in original_lines: if "<doc id=" in line: article_open = True elif "</doc>" in line: article_open = False document = [ tokenizer.convert_tokens_to_ids(tokenizer.tokenize(line)) for line in article_lines[1:] if (len(line) > 0 and not line.isspace()) ] examples = self.create_examples_from_document(document, block_size, tokenizer) self.examples.extend(examples) article_lines = [] else: if article_open: article_lines.append(line) logger.info("Dataset parse finished.") def create_examples_from_document(self, document, block_size, tokenizer, short_seq_prob=0.1): """Creates examples for a single document.""" # Account for special tokens max_num_tokens = block_size - tokenizer.num_special_tokens_to_add(pair=True) # We *usually* want to fill up the entire sequence since we are padding # to `block_size` anyways, so short sequences are generally wasted # computation. However, we *sometimes* # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter # sequences to minimize the mismatch between pretraining and fine-tuning. # The `target_seq_length` is just a rough target however, whereas # `block_size` is a hard limit. target_seq_length = max_num_tokens if random.random() < short_seq_prob: target_seq_length = random.randint(2, max_num_tokens) # We DON'T just concatenate all of the tokens from a document into a long # sequence and choose an arbitrary split point because this would make the # next sentence prediction task too easy. Instead, we split the input into # segments "A" and "B" based on the actual "sentences" provided by the user # input. examples = [] current_chunk = [] # a buffer stored current working segments current_length = 0 i = 0 while i < len(document): segment = document[i] # get a segment if not segment: i += 1 continue current_chunk.append(segment) # add a segment to current chunk current_length += len(segment) # overall token length # if current length goes to the target length or reaches the end of file, start building token a and b if i == len(document) - 1 or current_length >= target_seq_length: if current_chunk: # `a_end` is how many segments from `current_chunk` go into the `A` (first) sentence. a_end = 1 # if current chunk has more than 2 sentences, pick part of it `A` (first) sentence if len(current_chunk) >= 2: a_end = random.randint(1, len(current_chunk) - 1) # token a tokens_a = [] for j in range(a_end): tokens_a.extend(current_chunk[j]) # token b tokens_b = [] for j in range(a_end, len(current_chunk)): tokens_b.extend(current_chunk[j]) if len(tokens_a) == 0 or len(tokens_b) == 0: continue # switch tokens_a and tokens_b randomly if random.random() < 0.5: is_next = False tokens_a, tokens_b = tokens_b, tokens_a else: is_next = True def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens): """Truncates a pair of sequences to a maximum sequence length.""" while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_num_tokens: break trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b if not (len(trunc_tokens) >= 1): raise ValueError("Sequence length to be truncated must be no less than one") # We want to sometimes truncate from the front and sometimes from the # back to add more randomness and avoid biases. if random.random() < 0.5: del trunc_tokens[0] else: trunc_tokens.pop() truncate_seq_pair(tokens_a, tokens_b, max_num_tokens) if not (len(tokens_a) >= 1): raise ValueError(f"Length of sequence a is {len(tokens_a)} which must be no less than 1") if not (len(tokens_b) >= 1): raise ValueError(f"Length of sequence b is {len(tokens_b)} which must be no less than 1") # add special tokens input_ids = tokenizer.build_inputs_with_special_tokens(tokens_a, tokens_b) # add token type ids, 0 for sentence a, 1 for sentence b token_type_ids = tokenizer.create_token_type_ids_from_sequences(tokens_a, tokens_b) example = { "input_ids": torch.tensor(input_ids, dtype=torch.long), "token_type_ids": torch.tensor(token_type_ids, dtype=torch.long), "sentence_order_label": torch.tensor(0 if is_next else 1, dtype=torch.long), } examples.append(example) current_chunk = [] # clear current chunk current_length = 0 # reset current text length i += 1 # go to next line return examples def __len__(self): return len(self.examples) def __getitem__(self, i) -> Dict[str, torch.tensor]: return self.examples[i] class TextDatasetForNextSentencePrediction(Dataset): """ This will be superseded by a framework-agnostic approach soon. """ def __init__( self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, overwrite_cache=False, short_seq_probability=0.1, nsp_probability=0.5, ): warnings.warn( DEPRECATION_WARNING.format( "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py" ), FutureWarning, ) if not os.path.isfile(file_path): raise ValueError(f"Input file path {file_path} not found") self.short_seq_probability = short_seq_probability self.nsp_probability = nsp_probability directory, filename = os.path.split(file_path) cached_features_file = os.path.join( directory, f"cached_nsp_{tokenizer.__class__.__name__}_{block_size}_{filename}", ) self.tokenizer = tokenizer # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lock_path = cached_features_file + ".lock" # Input file format: # (1) One sentence per line. These should ideally be actual sentences, not # entire paragraphs or arbitrary spans of text. (Because we use the # sentence boundaries for the "next sentence prediction" task). # (2) Blank lines between documents. Document boundaries are needed so # that the "next sentence prediction" task doesn't span between documents. # # Example: # I am very happy. # Here is the second sentence. # # A new document. with FileLock(lock_path): if os.path.exists(cached_features_file) and not overwrite_cache: start = time.time() with open(cached_features_file, "rb") as handle: self.examples = pickle.load(handle) logger.info( f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start ) else: logger.info(f"Creating features from dataset file at {directory}") self.documents = [[]] with open(file_path, encoding="utf-8") as f: while True: line = f.readline() if not line: break line = line.strip() # Empty lines are used as document delimiters if not line and len(self.documents[-1]) != 0: self.documents.append([]) tokens = tokenizer.tokenize(line) tokens = tokenizer.convert_tokens_to_ids(tokens) if tokens: self.documents[-1].append(tokens) logger.info(f"Creating examples from {len(self.documents)} documents.") self.examples = [] for doc_index, document in enumerate(self.documents): self.create_examples_from_document(document, doc_index, block_size) start = time.time() with open(cached_features_file, "wb") as handle: pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL) logger.info( f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def create_examples_from_document(self, document: List[List[int]], doc_index: int, block_size: int): """Creates examples for a single document.""" max_num_tokens = block_size - self.tokenizer.num_special_tokens_to_add(pair=True) # We *usually* want to fill up the entire sequence since we are padding # to `block_size` anyways, so short sequences are generally wasted # computation. However, we *sometimes* # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter # sequences to minimize the mismatch between pretraining and fine-tuning. # The `target_seq_length` is just a rough target however, whereas # `block_size` is a hard limit. target_seq_length = max_num_tokens if random.random() < self.short_seq_probability: target_seq_length = random.randint(2, max_num_tokens) current_chunk = [] # a buffer stored current working segments current_length = 0 i = 0 while i < len(document): segment = document[i] current_chunk.append(segment) current_length += len(segment) if i == len(document) - 1 or current_length >= target_seq_length: if current_chunk: # `a_end` is how many segments from `current_chunk` go into the `A` # (first) sentence. a_end = 1 if len(current_chunk) >= 2: a_end = random.randint(1, len(current_chunk) - 1) tokens_a = [] for j in range(a_end): tokens_a.extend(current_chunk[j]) tokens_b = [] if len(current_chunk) == 1 or random.random() < self.nsp_probability: is_random_next = True target_b_length = target_seq_length - len(tokens_a) # This should rarely go for more than one iteration for large # corpora. However, just to be careful, we try to make sure that # the random document is not the same as the document # we're processing. for _ in range(10): random_document_index = random.randint(0, len(self.documents) - 1) if random_document_index != doc_index: break random_document = self.documents[random_document_index] random_start = random.randint(0, len(random_document) - 1) for j in range(random_start, len(random_document)): tokens_b.extend(random_document[j]) if len(tokens_b) >= target_b_length: break # We didn't actually use these segments so we "put them back" so # they don't go to waste. num_unused_segments = len(current_chunk) - a_end i -= num_unused_segments # Actual next else: is_random_next = False for j in range(a_end, len(current_chunk)): tokens_b.extend(current_chunk[j]) if not (len(tokens_a) >= 1): raise ValueError(f"Length of sequence a is {len(tokens_a)} which must be no less than 1") if not (len(tokens_b) >= 1): raise ValueError(f"Length of sequence b is {len(tokens_b)} which must be no less than 1") # add special tokens input_ids = self.tokenizer.build_inputs_with_special_tokens(tokens_a, tokens_b) # add token type ids, 0 for sentence a, 1 for sentence b token_type_ids = self.tokenizer.create_token_type_ids_from_sequences(tokens_a, tokens_b) example = { "input_ids": torch.tensor(input_ids, dtype=torch.long), "token_type_ids": torch.tensor(token_type_ids, dtype=torch.long), "next_sentence_label": torch.tensor(1 if is_random_next else 0, dtype=torch.long), } self.examples.append(example) current_chunk = [] current_length = 0 i += 1 def __len__(self): return len(self.examples) def __getitem__(self, i): return self.examples[i]
23,718
43.66855
121
py
transformers
transformers-main/src/transformers/data/datasets/__init__.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
909
36.916667
74
py
transformers
transformers-main/src/transformers/data/processors/glue.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ GLUE processors and helpers""" import os import warnings from dataclasses import asdict from enum import Enum from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_tf_available, logging from .utils import DataProcessor, InputExample, InputFeatures if is_tf_available(): import tensorflow as tf logger = logging.get_logger(__name__) DEPRECATION_WARNING = ( "This {0} will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets " "library. You can have a look at this example script for pointers: " "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" ) def glue_convert_examples_to_features( examples: Union[List[InputExample], "tf.data.Dataset"], tokenizer: PreTrainedTokenizer, max_length: Optional[int] = None, task=None, label_list=None, output_mode=None, ): """ Loads a data file into a list of `InputFeatures` Args: examples: List of `InputExamples` or `tf.data.Dataset` containing the examples. tokenizer: Instance of a tokenizer that will tokenize the examples max_length: Maximum example length. Defaults to the tokenizer's max_len task: GLUE task label_list: List of labels. Can be obtained from the processor using the `processor.get_labels()` method output_mode: String indicating the output mode. Either `regression` or `classification` Returns: If the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the task-specific features. If the input is a list of `InputExamples`, will return a list of task-specific `InputFeatures` which can be fed to the model. """ warnings.warn(DEPRECATION_WARNING.format("function"), FutureWarning) if is_tf_available() and isinstance(examples, tf.data.Dataset): if task is None: raise ValueError("When calling glue_convert_examples_to_features from TF, the task parameter is required.") return _tf_glue_convert_examples_to_features(examples, tokenizer, max_length=max_length, task=task) return _glue_convert_examples_to_features( examples, tokenizer, max_length=max_length, task=task, label_list=label_list, output_mode=output_mode ) if is_tf_available(): def _tf_glue_convert_examples_to_features( examples: tf.data.Dataset, tokenizer: PreTrainedTokenizer, task=str, max_length: Optional[int] = None, ) -> tf.data.Dataset: """ Returns: A `tf.data.Dataset` containing the task-specific features. """ processor = glue_processors[task]() examples = [processor.tfds_map(processor.get_example_from_tensor_dict(example)) for example in examples] features = glue_convert_examples_to_features(examples, tokenizer, max_length=max_length, task=task) label_type = tf.float32 if task == "sts-b" else tf.int64 def gen(): for ex in features: d = {k: v for k, v in asdict(ex).items() if v is not None} label = d.pop("label") yield (d, label) input_names = tokenizer.model_input_names return tf.data.Dataset.from_generator( gen, ({k: tf.int32 for k in input_names}, label_type), ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])), ) def _glue_convert_examples_to_features( examples: List[InputExample], tokenizer: PreTrainedTokenizer, max_length: Optional[int] = None, task=None, label_list=None, output_mode=None, ): if max_length is None: max_length = tokenizer.model_max_length if task is not None: processor = glue_processors[task]() if label_list is None: label_list = processor.get_labels() logger.info(f"Using label list {label_list} for task {task}") if output_mode is None: output_mode = glue_output_modes[task] logger.info(f"Using output mode {output_mode} for task {task}") label_map = {label: i for i, label in enumerate(label_list)} def label_from_example(example: InputExample) -> Union[int, float, None]: if example.label is None: return None if output_mode == "classification": return label_map[example.label] elif output_mode == "regression": return float(example.label) raise KeyError(output_mode) labels = [label_from_example(example) for example in examples] batch_encoding = tokenizer( [(example.text_a, example.text_b) for example in examples], max_length=max_length, padding="max_length", truncation=True, ) features = [] for i in range(len(examples)): inputs = {k: batch_encoding[k][i] for k in batch_encoding} feature = InputFeatures(**inputs, label=labels[i]) features.append(feature) for i, example in enumerate(examples[:5]): logger.info("*** Example ***") logger.info(f"guid: {example.guid}") logger.info(f"features: {features[i]}") return features class OutputMode(Enum): classification = "classification" regression = "regression" class MrpcProcessor(DataProcessor): """Processor for the MRPC data set (GLUE version).""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) def get_example_from_tensor_dict(self, tensor_dict): """See base class.""" return InputExample( tensor_dict["idx"].numpy(), tensor_dict["sentence1"].numpy().decode("utf-8"), tensor_dict["sentence2"].numpy().decode("utf-8"), str(tensor_dict["label"].numpy()), ) def get_train_examples(self, data_dir): """See base class.""" logger.info(f"LOOKING AT {os.path.join(data_dir, 'train.tsv')}") return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training, dev and test sets.""" examples = [] for i, line in enumerate(lines): if i == 0: continue guid = f"{set_type}-{i}" text_a = line[3] text_b = line[4] label = None if set_type == "test" else line[0] examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MnliProcessor(DataProcessor): """Processor for the MultiNLI data set (GLUE version).""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) def get_example_from_tensor_dict(self, tensor_dict): """See base class.""" return InputExample( tensor_dict["idx"].numpy(), tensor_dict["premise"].numpy().decode("utf-8"), tensor_dict["hypothesis"].numpy().decode("utf-8"), str(tensor_dict["label"].numpy()), ) def get_train_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test_matched") def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] def _create_examples(self, lines, set_type): """Creates examples for the training, dev and test sets.""" examples = [] for i, line in enumerate(lines): if i == 0: continue guid = f"{set_type}-{line[0]}" text_a = line[8] text_b = line[9] label = None if set_type.startswith("test") else line[-1] examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MnliMismatchedProcessor(MnliProcessor): """Processor for the MultiNLI Mismatched data set (GLUE version).""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")), "dev_mismatched") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "test_mismatched.tsv")), "test_mismatched") class ColaProcessor(DataProcessor): """Processor for the CoLA data set (GLUE version).""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) def get_example_from_tensor_dict(self, tensor_dict): """See base class.""" return InputExample( tensor_dict["idx"].numpy(), tensor_dict["sentence"].numpy().decode("utf-8"), None, str(tensor_dict["label"].numpy()), ) def get_train_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training, dev and test sets.""" test_mode = set_type == "test" if test_mode: lines = lines[1:] text_index = 1 if test_mode else 3 examples = [] for i, line in enumerate(lines): guid = f"{set_type}-{i}" text_a = line[text_index] label = None if test_mode else line[1] examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class Sst2Processor(DataProcessor): """Processor for the SST-2 data set (GLUE version).""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) def get_example_from_tensor_dict(self, tensor_dict): """See base class.""" return InputExample( tensor_dict["idx"].numpy(), tensor_dict["sentence"].numpy().decode("utf-8"), None, str(tensor_dict["label"].numpy()), ) def get_train_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training, dev and test sets.""" examples = [] text_index = 1 if set_type == "test" else 0 for i, line in enumerate(lines): if i == 0: continue guid = f"{set_type}-{i}" text_a = line[text_index] label = None if set_type == "test" else line[1] examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class StsbProcessor(DataProcessor): """Processor for the STS-B data set (GLUE version).""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) def get_example_from_tensor_dict(self, tensor_dict): """See base class.""" return InputExample( tensor_dict["idx"].numpy(), tensor_dict["sentence1"].numpy().decode("utf-8"), tensor_dict["sentence2"].numpy().decode("utf-8"), str(tensor_dict["label"].numpy()), ) def get_train_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return [None] def _create_examples(self, lines, set_type): """Creates examples for the training, dev and test sets.""" examples = [] for i, line in enumerate(lines): if i == 0: continue guid = f"{set_type}-{line[0]}" text_a = line[7] text_b = line[8] label = None if set_type == "test" else line[-1] examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class QqpProcessor(DataProcessor): """Processor for the QQP data set (GLUE version).""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) def get_example_from_tensor_dict(self, tensor_dict): """See base class.""" return InputExample( tensor_dict["idx"].numpy(), tensor_dict["question1"].numpy().decode("utf-8"), tensor_dict["question2"].numpy().decode("utf-8"), str(tensor_dict["label"].numpy()), ) def get_train_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training, dev and test sets.""" test_mode = set_type == "test" q1_index = 1 if test_mode else 3 q2_index = 2 if test_mode else 4 examples = [] for i, line in enumerate(lines): if i == 0: continue guid = f"{set_type}-{line[0]}" try: text_a = line[q1_index] text_b = line[q2_index] label = None if test_mode else line[5] except IndexError: continue examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class QnliProcessor(DataProcessor): """Processor for the QNLI data set (GLUE version).""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) def get_example_from_tensor_dict(self, tensor_dict): """See base class.""" return InputExample( tensor_dict["idx"].numpy(), tensor_dict["question"].numpy().decode("utf-8"), tensor_dict["sentence"].numpy().decode("utf-8"), str(tensor_dict["label"].numpy()), ) def get_train_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["entailment", "not_entailment"] def _create_examples(self, lines, set_type): """Creates examples for the training, dev and test sets.""" examples = [] for i, line in enumerate(lines): if i == 0: continue guid = f"{set_type}-{line[0]}" text_a = line[1] text_b = line[2] label = None if set_type == "test" else line[-1] examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class RteProcessor(DataProcessor): """Processor for the RTE data set (GLUE version).""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) def get_example_from_tensor_dict(self, tensor_dict): """See base class.""" return InputExample( tensor_dict["idx"].numpy(), tensor_dict["sentence1"].numpy().decode("utf-8"), tensor_dict["sentence2"].numpy().decode("utf-8"), str(tensor_dict["label"].numpy()), ) def get_train_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["entailment", "not_entailment"] def _create_examples(self, lines, set_type): """Creates examples for the training, dev and test sets.""" examples = [] for i, line in enumerate(lines): if i == 0: continue guid = f"{set_type}-{line[0]}" text_a = line[1] text_b = line[2] label = None if set_type == "test" else line[-1] examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class WnliProcessor(DataProcessor): """Processor for the WNLI data set (GLUE version).""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) def get_example_from_tensor_dict(self, tensor_dict): """See base class.""" return InputExample( tensor_dict["idx"].numpy(), tensor_dict["sentence1"].numpy().decode("utf-8"), tensor_dict["sentence2"].numpy().decode("utf-8"), str(tensor_dict["label"].numpy()), ) def get_train_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training, dev and test sets.""" examples = [] for i, line in enumerate(lines): if i == 0: continue guid = f"{set_type}-{line[0]}" text_a = line[1] text_b = line[2] label = None if set_type == "test" else line[-1] examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples glue_tasks_num_labels = { "cola": 2, "mnli": 3, "mrpc": 2, "sst-2": 2, "sts-b": 1, "qqp": 2, "qnli": 2, "rte": 2, "wnli": 2, } glue_processors = { "cola": ColaProcessor, "mnli": MnliProcessor, "mnli-mm": MnliMismatchedProcessor, "mrpc": MrpcProcessor, "sst-2": Sst2Processor, "sts-b": StsbProcessor, "qqp": QqpProcessor, "qnli": QnliProcessor, "rte": RteProcessor, "wnli": WnliProcessor, } glue_output_modes = { "cola": "classification", "mnli": "classification", "mnli-mm": "classification", "mrpc": "classification", "sst-2": "classification", "sts-b": "regression", "qqp": "classification", "qnli": "classification", "rte": "classification", "wnli": "classification", }
23,216
35.051242
119
py
transformers
transformers-main/src/transformers/data/processors/squad.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from functools import partial from multiprocessing import Pool, cpu_count import numpy as np from tqdm import tqdm from ...models.bert.tokenization_bert import whitespace_tokenize from ...tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase, TruncationStrategy from ...utils import is_tf_available, is_torch_available, logging from .utils import DataProcessor # Store the tokenizers which insert 2 separators tokens MULTI_SEP_TOKENS_TOKENIZERS_SET = {"roberta", "camembert", "bart", "mpnet"} if is_torch_available(): import torch from torch.utils.data import TensorDataset if is_tf_available(): import tensorflow as tf logger = logging.get_logger(__name__) def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): """Returns tokenized answer spans that better match the annotated answer.""" tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) for new_start in range(input_start, input_end + 1): for new_end in range(input_end, new_start - 1, -1): text_span = " ".join(doc_tokens[new_start : (new_end + 1)]) if text_span == tok_answer_text: return (new_start, new_end) return (input_start, input_end) def _check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" best_score = None best_span_index = None for span_index, doc_span in enumerate(doc_spans): end = doc_span.start + doc_span.length - 1 if position < doc_span.start: continue if position > end: continue num_left_context = position - doc_span.start num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span.length if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index def _new_check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" # if len(doc_spans) == 1: # return True best_score = None best_span_index = None for span_index, doc_span in enumerate(doc_spans): end = doc_span["start"] + doc_span["length"] - 1 if position < doc_span["start"]: continue if position > end: continue num_left_context = position - doc_span["start"] num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span["length"] if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index def _is_whitespace(c): if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: return True return False def squad_convert_example_to_features( example, max_seq_length, doc_stride, max_query_length, padding_strategy, is_training ): features = [] if is_training and not example.is_impossible: # Get start and end position start_position = example.start_position end_position = example.end_position # If the answer cannot be found in the text, then skip this example. actual_text = " ".join(example.doc_tokens[start_position : (end_position + 1)]) cleaned_answer_text = " ".join(whitespace_tokenize(example.answer_text)) if actual_text.find(cleaned_answer_text) == -1: logger.warning(f"Could not find answer: '{actual_text}' vs. '{cleaned_answer_text}'") return [] tok_to_orig_index = [] orig_to_tok_index = [] all_doc_tokens = [] for i, token in enumerate(example.doc_tokens): orig_to_tok_index.append(len(all_doc_tokens)) if tokenizer.__class__.__name__ in [ "RobertaTokenizer", "LongformerTokenizer", "BartTokenizer", "RobertaTokenizerFast", "LongformerTokenizerFast", "BartTokenizerFast", ]: sub_tokens = tokenizer.tokenize(token, add_prefix_space=True) else: sub_tokens = tokenizer.tokenize(token) for sub_token in sub_tokens: tok_to_orig_index.append(i) all_doc_tokens.append(sub_token) if is_training and not example.is_impossible: tok_start_position = orig_to_tok_index[example.start_position] if example.end_position < len(example.doc_tokens) - 1: tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 else: tok_end_position = len(all_doc_tokens) - 1 (tok_start_position, tok_end_position) = _improve_answer_span( all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.answer_text ) spans = [] truncated_query = tokenizer.encode( example.question_text, add_special_tokens=False, truncation=True, max_length=max_query_length ) # Tokenizers who insert 2 SEP tokens in-between <context> & <question> need to have special handling # in the way they compute mask of added tokens. tokenizer_type = type(tokenizer).__name__.replace("Tokenizer", "").lower() sequence_added_tokens = ( tokenizer.model_max_length - tokenizer.max_len_single_sentence + 1 if tokenizer_type in MULTI_SEP_TOKENS_TOKENIZERS_SET else tokenizer.model_max_length - tokenizer.max_len_single_sentence ) sequence_pair_added_tokens = tokenizer.model_max_length - tokenizer.max_len_sentences_pair span_doc_tokens = all_doc_tokens while len(spans) * doc_stride < len(all_doc_tokens): # Define the side we want to truncate / pad and the text/pair sorting if tokenizer.padding_side == "right": texts = truncated_query pairs = span_doc_tokens truncation = TruncationStrategy.ONLY_SECOND.value else: texts = span_doc_tokens pairs = truncated_query truncation = TruncationStrategy.ONLY_FIRST.value encoded_dict = tokenizer.encode_plus( # TODO(thom) update this logic texts, pairs, truncation=truncation, padding=padding_strategy, max_length=max_seq_length, return_overflowing_tokens=True, stride=max_seq_length - doc_stride - len(truncated_query) - sequence_pair_added_tokens, return_token_type_ids=True, ) paragraph_len = min( len(all_doc_tokens) - len(spans) * doc_stride, max_seq_length - len(truncated_query) - sequence_pair_added_tokens, ) if tokenizer.pad_token_id in encoded_dict["input_ids"]: if tokenizer.padding_side == "right": non_padded_ids = encoded_dict["input_ids"][: encoded_dict["input_ids"].index(tokenizer.pad_token_id)] else: last_padding_id_position = ( len(encoded_dict["input_ids"]) - 1 - encoded_dict["input_ids"][::-1].index(tokenizer.pad_token_id) ) non_padded_ids = encoded_dict["input_ids"][last_padding_id_position + 1 :] else: non_padded_ids = encoded_dict["input_ids"] tokens = tokenizer.convert_ids_to_tokens(non_padded_ids) token_to_orig_map = {} for i in range(paragraph_len): index = len(truncated_query) + sequence_added_tokens + i if tokenizer.padding_side == "right" else i token_to_orig_map[index] = tok_to_orig_index[len(spans) * doc_stride + i] encoded_dict["paragraph_len"] = paragraph_len encoded_dict["tokens"] = tokens encoded_dict["token_to_orig_map"] = token_to_orig_map encoded_dict["truncated_query_with_special_tokens_length"] = len(truncated_query) + sequence_added_tokens encoded_dict["token_is_max_context"] = {} encoded_dict["start"] = len(spans) * doc_stride encoded_dict["length"] = paragraph_len spans.append(encoded_dict) if "overflowing_tokens" not in encoded_dict or ( "overflowing_tokens" in encoded_dict and len(encoded_dict["overflowing_tokens"]) == 0 ): break span_doc_tokens = encoded_dict["overflowing_tokens"] for doc_span_index in range(len(spans)): for j in range(spans[doc_span_index]["paragraph_len"]): is_max_context = _new_check_is_max_context(spans, doc_span_index, doc_span_index * doc_stride + j) index = ( j if tokenizer.padding_side == "left" else spans[doc_span_index]["truncated_query_with_special_tokens_length"] + j ) spans[doc_span_index]["token_is_max_context"][index] = is_max_context for span in spans: # Identify the position of the CLS token cls_index = span["input_ids"].index(tokenizer.cls_token_id) # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer) # Original TF implementation also keep the classification token (set to 0) p_mask = np.ones_like(span["token_type_ids"]) if tokenizer.padding_side == "right": p_mask[len(truncated_query) + sequence_added_tokens :] = 0 else: p_mask[-len(span["tokens"]) : -(len(truncated_query) + sequence_added_tokens)] = 0 pad_token_indices = np.where(span["input_ids"] == tokenizer.pad_token_id) special_token_indices = np.asarray( tokenizer.get_special_tokens_mask(span["input_ids"], already_has_special_tokens=True) ).nonzero() p_mask[pad_token_indices] = 1 p_mask[special_token_indices] = 1 # Set the cls index to 0: the CLS index can be used for impossible answers p_mask[cls_index] = 0 span_is_impossible = example.is_impossible start_position = 0 end_position = 0 if is_training and not span_is_impossible: # For training, if our document chunk does not contain an annotation # we throw it out, since there is nothing to predict. doc_start = span["start"] doc_end = span["start"] + span["length"] - 1 out_of_span = False if not (tok_start_position >= doc_start and tok_end_position <= doc_end): out_of_span = True if out_of_span: start_position = cls_index end_position = cls_index span_is_impossible = True else: if tokenizer.padding_side == "left": doc_offset = 0 else: doc_offset = len(truncated_query) + sequence_added_tokens start_position = tok_start_position - doc_start + doc_offset end_position = tok_end_position - doc_start + doc_offset features.append( SquadFeatures( span["input_ids"], span["attention_mask"], span["token_type_ids"], cls_index, p_mask.tolist(), example_index=0, # Can not set unique_id and example_index here. They will be set after multiple processing. unique_id=0, paragraph_len=span["paragraph_len"], token_is_max_context=span["token_is_max_context"], tokens=span["tokens"], token_to_orig_map=span["token_to_orig_map"], start_position=start_position, end_position=end_position, is_impossible=span_is_impossible, qas_id=example.qas_id, ) ) return features def squad_convert_example_to_features_init(tokenizer_for_convert: PreTrainedTokenizerBase): global tokenizer tokenizer = tokenizer_for_convert def squad_convert_examples_to_features( examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training, padding_strategy="max_length", return_dataset=False, threads=1, tqdm_enabled=True, ): """ Converts a list of examples into a list of features that can be directly given as input to a model. It is model-dependant and takes advantage of many of the tokenizer's features to create the model's inputs. Args: examples: list of [`~data.processors.squad.SquadExample`] tokenizer: an instance of a child of [`PreTrainedTokenizer`] max_seq_length: The maximum sequence length of the inputs. doc_stride: The stride used when the context is too large and is split across several features. max_query_length: The maximum length of the query. is_training: whether to create features for model evaluation or model training. padding_strategy: Default to "max_length". Which padding strategy to use return_dataset: Default False. Either 'pt' or 'tf'. if 'pt': returns a torch.data.TensorDataset, if 'tf': returns a tf.data.Dataset threads: multiple processing threads. Returns: list of [`~data.processors.squad.SquadFeatures`] Example: ```python processor = SquadV2Processor() examples = processor.get_dev_examples(data_dir) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=not evaluate, ) ```""" # Defining helper methods features = [] threads = min(threads, cpu_count()) with Pool(threads, initializer=squad_convert_example_to_features_init, initargs=(tokenizer,)) as p: annotate_ = partial( squad_convert_example_to_features, max_seq_length=max_seq_length, doc_stride=doc_stride, max_query_length=max_query_length, padding_strategy=padding_strategy, is_training=is_training, ) features = list( tqdm( p.imap(annotate_, examples, chunksize=32), total=len(examples), desc="convert squad examples to features", disable=not tqdm_enabled, ) ) new_features = [] unique_id = 1000000000 example_index = 0 for example_features in tqdm( features, total=len(features), desc="add example index and unique id", disable=not tqdm_enabled ): if not example_features: continue for example_feature in example_features: example_feature.example_index = example_index example_feature.unique_id = unique_id new_features.append(example_feature) unique_id += 1 example_index += 1 features = new_features del new_features if return_dataset == "pt": if not is_torch_available(): raise RuntimeError("PyTorch must be installed to return a PyTorch dataset.") # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_masks = torch.tensor([f.attention_mask for f in features], dtype=torch.long) all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long) all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float) all_is_impossible = torch.tensor([f.is_impossible for f in features], dtype=torch.float) if not is_training: all_feature_index = torch.arange(all_input_ids.size(0), dtype=torch.long) dataset = TensorDataset( all_input_ids, all_attention_masks, all_token_type_ids, all_feature_index, all_cls_index, all_p_mask ) else: all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long) all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long) dataset = TensorDataset( all_input_ids, all_attention_masks, all_token_type_ids, all_start_positions, all_end_positions, all_cls_index, all_p_mask, all_is_impossible, ) return features, dataset elif return_dataset == "tf": if not is_tf_available(): raise RuntimeError("TensorFlow must be installed to return a TensorFlow dataset.") def gen(): for i, ex in enumerate(features): if ex.token_type_ids is None: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "feature_index": i, "qas_id": ex.qas_id, }, { "start_positions": ex.start_position, "end_positions": ex.end_position, "cls_index": ex.cls_index, "p_mask": ex.p_mask, "is_impossible": ex.is_impossible, }, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, "feature_index": i, "qas_id": ex.qas_id, }, { "start_positions": ex.start_position, "end_positions": ex.end_position, "cls_index": ex.cls_index, "p_mask": ex.p_mask, "is_impossible": ex.is_impossible, }, ) # Why have we split the batch into a tuple? PyTorch just has a list of tensors. if "token_type_ids" in tokenizer.model_input_names: train_types = ( { "input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32, "feature_index": tf.int64, "qas_id": tf.string, }, { "start_positions": tf.int64, "end_positions": tf.int64, "cls_index": tf.int64, "p_mask": tf.int32, "is_impossible": tf.int32, }, ) train_shapes = ( { "input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None]), "token_type_ids": tf.TensorShape([None]), "feature_index": tf.TensorShape([]), "qas_id": tf.TensorShape([]), }, { "start_positions": tf.TensorShape([]), "end_positions": tf.TensorShape([]), "cls_index": tf.TensorShape([]), "p_mask": tf.TensorShape([None]), "is_impossible": tf.TensorShape([]), }, ) else: train_types = ( {"input_ids": tf.int32, "attention_mask": tf.int32, "feature_index": tf.int64, "qas_id": tf.string}, { "start_positions": tf.int64, "end_positions": tf.int64, "cls_index": tf.int64, "p_mask": tf.int32, "is_impossible": tf.int32, }, ) train_shapes = ( { "input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None]), "feature_index": tf.TensorShape([]), "qas_id": tf.TensorShape([]), }, { "start_positions": tf.TensorShape([]), "end_positions": tf.TensorShape([]), "cls_index": tf.TensorShape([]), "p_mask": tf.TensorShape([None]), "is_impossible": tf.TensorShape([]), }, ) return tf.data.Dataset.from_generator(gen, train_types, train_shapes) else: return features class SquadProcessor(DataProcessor): """ Processor for the SQuAD data set. overridden by SquadV1Processor and SquadV2Processor, used by the version 1.1 and version 2.0 of SQuAD, respectively. """ train_file = None dev_file = None def _get_example_from_tensor_dict(self, tensor_dict, evaluate=False): if not evaluate: answer = tensor_dict["answers"]["text"][0].numpy().decode("utf-8") answer_start = tensor_dict["answers"]["answer_start"][0].numpy() answers = [] else: answers = [ {"answer_start": start.numpy(), "text": text.numpy().decode("utf-8")} for start, text in zip(tensor_dict["answers"]["answer_start"], tensor_dict["answers"]["text"]) ] answer = None answer_start = None return SquadExample( qas_id=tensor_dict["id"].numpy().decode("utf-8"), question_text=tensor_dict["question"].numpy().decode("utf-8"), context_text=tensor_dict["context"].numpy().decode("utf-8"), answer_text=answer, start_position_character=answer_start, title=tensor_dict["title"].numpy().decode("utf-8"), answers=answers, ) def get_examples_from_dataset(self, dataset, evaluate=False): """ Creates a list of [`~data.processors.squad.SquadExample`] using a TFDS dataset. Args: dataset: The tfds dataset loaded from *tensorflow_datasets.load("squad")* evaluate: Boolean specifying if in evaluation mode or in training mode Returns: List of SquadExample Examples: ```python >>> import tensorflow_datasets as tfds >>> dataset = tfds.load("squad") >>> training_examples = get_examples_from_dataset(dataset, evaluate=False) >>> evaluation_examples = get_examples_from_dataset(dataset, evaluate=True) ```""" if evaluate: dataset = dataset["validation"] else: dataset = dataset["train"] examples = [] for tensor_dict in tqdm(dataset): examples.append(self._get_example_from_tensor_dict(tensor_dict, evaluate=evaluate)) return examples def get_train_examples(self, data_dir, filename=None): """ Returns the training examples from the data directory. Args: data_dir: Directory containing the data files used for training and evaluating. filename: None by default, specify this if the training file has a different name than the original one which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively. """ if data_dir is None: data_dir = "" if self.train_file is None: raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor") with open( os.path.join(data_dir, self.train_file if filename is None else filename), "r", encoding="utf-8" ) as reader: input_data = json.load(reader)["data"] return self._create_examples(input_data, "train") def get_dev_examples(self, data_dir, filename=None): """ Returns the evaluation example from the data directory. Args: data_dir: Directory containing the data files used for training and evaluating. filename: None by default, specify this if the evaluation file has a different name than the original one which is `dev-v1.1.json` and `dev-v2.0.json` for squad versions 1.1 and 2.0 respectively. """ if data_dir is None: data_dir = "" if self.dev_file is None: raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor") with open( os.path.join(data_dir, self.dev_file if filename is None else filename), "r", encoding="utf-8" ) as reader: input_data = json.load(reader)["data"] return self._create_examples(input_data, "dev") def _create_examples(self, input_data, set_type): is_training = set_type == "train" examples = [] for entry in tqdm(input_data): title = entry["title"] for paragraph in entry["paragraphs"]: context_text = paragraph["context"] for qa in paragraph["qas"]: qas_id = qa["id"] question_text = qa["question"] start_position_character = None answer_text = None answers = [] is_impossible = qa.get("is_impossible", False) if not is_impossible: if is_training: answer = qa["answers"][0] answer_text = answer["text"] start_position_character = answer["answer_start"] else: answers = qa["answers"] example = SquadExample( qas_id=qas_id, question_text=question_text, context_text=context_text, answer_text=answer_text, start_position_character=start_position_character, title=title, is_impossible=is_impossible, answers=answers, ) examples.append(example) return examples class SquadV1Processor(SquadProcessor): train_file = "train-v1.1.json" dev_file = "dev-v1.1.json" class SquadV2Processor(SquadProcessor): train_file = "train-v2.0.json" dev_file = "dev-v2.0.json" class SquadExample: """ A single training/test example for the Squad dataset, as loaded from disk. Args: qas_id: The example's unique identifier question_text: The question string context_text: The context string answer_text: The answer string start_position_character: The character position of the start of the answer title: The title of the example answers: None by default, this is used during evaluation. Holds answers as well as their start positions. is_impossible: False by default, set to True if the example has no possible answer. """ def __init__( self, qas_id, question_text, context_text, answer_text, start_position_character, title, answers=[], is_impossible=False, ): self.qas_id = qas_id self.question_text = question_text self.context_text = context_text self.answer_text = answer_text self.title = title self.is_impossible = is_impossible self.answers = answers self.start_position, self.end_position = 0, 0 doc_tokens = [] char_to_word_offset = [] prev_is_whitespace = True # Split on whitespace so that different tokens may be attributed to their original position. for c in self.context_text: if _is_whitespace(c): prev_is_whitespace = True else: if prev_is_whitespace: doc_tokens.append(c) else: doc_tokens[-1] += c prev_is_whitespace = False char_to_word_offset.append(len(doc_tokens) - 1) self.doc_tokens = doc_tokens self.char_to_word_offset = char_to_word_offset # Start and end positions only has a value during evaluation. if start_position_character is not None and not is_impossible: self.start_position = char_to_word_offset[start_position_character] self.end_position = char_to_word_offset[ min(start_position_character + len(answer_text) - 1, len(char_to_word_offset) - 1) ] class SquadFeatures: """ Single squad example features to be fed to a model. Those features are model-specific and can be crafted from [`~data.processors.squad.SquadExample`] using the :method:*~transformers.data.processors.squad.squad_convert_examples_to_features* method. Args: input_ids: Indices of input sequence tokens in the vocabulary. attention_mask: Mask to avoid performing attention on padding token indices. token_type_ids: Segment token indices to indicate first and second portions of the inputs. cls_index: the index of the CLS token. p_mask: Mask identifying tokens that can be answers vs. tokens that cannot. Mask with 1 for tokens than cannot be in the answer and 0 for token that can be in an answer example_index: the index of the example unique_id: The unique Feature identifier paragraph_len: The length of the context token_is_max_context: List of booleans identifying which tokens have their maximum context in this feature object. If a token does not have their maximum context in this feature object, it means that another feature object has more information related to that token and should be prioritized over this feature for that token. tokens: list of tokens corresponding to the input ids token_to_orig_map: mapping between the tokens and the original text, needed in order to identify the answer. start_position: start of the answer token index end_position: end of the answer token index encoding: optionally store the BatchEncoding with the fast-tokenizer alignment methods. """ def __init__( self, input_ids, attention_mask, token_type_ids, cls_index, p_mask, example_index, unique_id, paragraph_len, token_is_max_context, tokens, token_to_orig_map, start_position, end_position, is_impossible, qas_id: str = None, encoding: BatchEncoding = None, ): self.input_ids = input_ids self.attention_mask = attention_mask self.token_type_ids = token_type_ids self.cls_index = cls_index self.p_mask = p_mask self.example_index = example_index self.unique_id = unique_id self.paragraph_len = paragraph_len self.token_is_max_context = token_is_max_context self.tokens = tokens self.token_to_orig_map = token_to_orig_map self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible self.qas_id = qas_id self.encoding = encoding class SquadResult: """ Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset. Args: unique_id: The unique identifier corresponding to that example. start_logits: The logits corresponding to the start of the answer end_logits: The logits corresponding to the end of the answer """ def __init__(self, unique_id, start_logits, end_logits, start_top_index=None, end_top_index=None, cls_logits=None): self.start_logits = start_logits self.end_logits = end_logits self.unique_id = unique_id if start_top_index: self.start_top_index = start_top_index self.end_top_index = end_top_index self.cls_logits = cls_logits
33,153
38.189125
125
py
transformers
transformers-main/src/transformers/data/processors/utils.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import csv import dataclasses import json from dataclasses import dataclass from typing import List, Optional, Union from ...utils import is_tf_available, is_torch_available, logging logger = logging.get_logger(__name__) @dataclass class InputExample: """ A single training/test example for simple sequence classification. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ guid: str text_a: str text_b: Optional[str] = None label: Optional[str] = None def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(dataclasses.asdict(self), indent=2) + "\n" @dataclass(frozen=True) class InputFeatures: """ A single set of features of data. Property names are the same names as the corresponding inputs to a model. Args: input_ids: Indices of input sequence tokens in the vocabulary. attention_mask: Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: Usually `1` for tokens that are NOT MASKED, `0` for MASKED (padded) tokens. token_type_ids: (Optional) Segment token indices to indicate first and second portions of the inputs. Only some models use them. label: (Optional) Label corresponding to the input. Int for classification problems, float for regression problems. """ input_ids: List[int] attention_mask: Optional[List[int]] = None token_type_ids: Optional[List[int]] = None label: Optional[Union[int, float]] = None def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(dataclasses.asdict(self)) + "\n" class DataProcessor: """Base class for data converters for sequence classification data sets.""" def get_example_from_tensor_dict(self, tensor_dict): """ Gets an example from a dict with tensorflow tensors. Args: tensor_dict: Keys and values should match the corresponding Glue tensorflow_dataset examples. """ raise NotImplementedError() def get_train_examples(self, data_dir): """Gets a collection of [`InputExample`] for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of [`InputExample`] for the dev set.""" raise NotImplementedError() def get_test_examples(self, data_dir): """Gets a collection of [`InputExample`] for the test set.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() def tfds_map(self, example): """ Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are. This method converts examples to the correct format. """ if len(self.get_labels()) > 1: example.label = self.get_labels()[int(example.label)] return example @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with open(input_file, "r", encoding="utf-8-sig") as f: return list(csv.reader(f, delimiter="\t", quotechar=quotechar)) class SingleSentenceClassificationProcessor(DataProcessor): """Generic processor for a single sentence classification data set.""" def __init__(self, labels=None, examples=None, mode="classification", verbose=False): self.labels = [] if labels is None else labels self.examples = [] if examples is None else examples self.mode = mode self.verbose = verbose def __len__(self): return len(self.examples) def __getitem__(self, idx): if isinstance(idx, slice): return SingleSentenceClassificationProcessor(labels=self.labels, examples=self.examples[idx]) return self.examples[idx] @classmethod def create_from_csv( cls, file_name, split_name="", column_label=0, column_text=1, column_id=None, skip_first_row=False, **kwargs ): processor = cls(**kwargs) processor.add_examples_from_csv( file_name, split_name=split_name, column_label=column_label, column_text=column_text, column_id=column_id, skip_first_row=skip_first_row, overwrite_labels=True, overwrite_examples=True, ) return processor @classmethod def create_from_examples(cls, texts_or_text_and_labels, labels=None, **kwargs): processor = cls(**kwargs) processor.add_examples(texts_or_text_and_labels, labels=labels) return processor def add_examples_from_csv( self, file_name, split_name="", column_label=0, column_text=1, column_id=None, skip_first_row=False, overwrite_labels=False, overwrite_examples=False, ): lines = self._read_tsv(file_name) if skip_first_row: lines = lines[1:] texts = [] labels = [] ids = [] for i, line in enumerate(lines): texts.append(line[column_text]) labels.append(line[column_label]) if column_id is not None: ids.append(line[column_id]) else: guid = f"{split_name}-{i}" if split_name else str(i) ids.append(guid) return self.add_examples( texts, labels, ids, overwrite_labels=overwrite_labels, overwrite_examples=overwrite_examples ) def add_examples( self, texts_or_text_and_labels, labels=None, ids=None, overwrite_labels=False, overwrite_examples=False ): if labels is not None and len(texts_or_text_and_labels) != len(labels): raise ValueError( f"Text and labels have mismatched lengths {len(texts_or_text_and_labels)} and {len(labels)}" ) if ids is not None and len(texts_or_text_and_labels) != len(ids): raise ValueError(f"Text and ids have mismatched lengths {len(texts_or_text_and_labels)} and {len(ids)}") if ids is None: ids = [None] * len(texts_or_text_and_labels) if labels is None: labels = [None] * len(texts_or_text_and_labels) examples = [] added_labels = set() for text_or_text_and_label, label, guid in zip(texts_or_text_and_labels, labels, ids): if isinstance(text_or_text_and_label, (tuple, list)) and label is None: text, label = text_or_text_and_label else: text = text_or_text_and_label added_labels.add(label) examples.append(InputExample(guid=guid, text_a=text, text_b=None, label=label)) # Update examples if overwrite_examples: self.examples = examples else: self.examples.extend(examples) # Update labels if overwrite_labels: self.labels = list(added_labels) else: self.labels = list(set(self.labels).union(added_labels)) return self.examples def get_features( self, tokenizer, max_length=None, pad_on_left=False, pad_token=0, mask_padding_with_zero=True, return_tensors=None, ): """ Convert examples in a list of `InputFeatures` Args: tokenizer: Instance of a tokenizer that will tokenize the examples max_length: Maximum example length pad_on_left: If set to `True`, the examples will be padded on the left rather than on the right (default) pad_token: Padding token mask_padding_with_zero: If set to `True`, the attention mask will be filled by `1` for actual values and by `0` for padded values. If set to `False`, inverts it (`1` for padded values, `0` for actual values) Returns: If the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the task-specific features. If the input is a list of `InputExamples`, will return a list of task-specific `InputFeatures` which can be fed to the model. """ if max_length is None: max_length = tokenizer.max_len label_map = {label: i for i, label in enumerate(self.labels)} all_input_ids = [] for ex_index, example in enumerate(self.examples): if ex_index % 10000 == 0: logger.info(f"Tokenizing example {ex_index}") input_ids = tokenizer.encode( example.text_a, add_special_tokens=True, max_length=min(max_length, tokenizer.max_len), ) all_input_ids.append(input_ids) batch_length = max(len(input_ids) for input_ids in all_input_ids) features = [] for ex_index, (input_ids, example) in enumerate(zip(all_input_ids, self.examples)): if ex_index % 10000 == 0: logger.info(f"Writing example {ex_index}/{len(self.examples)}") # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. padding_length = batch_length - len(input_ids) if pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask else: input_ids = input_ids + ([pad_token] * padding_length) attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length) if len(input_ids) != batch_length: raise ValueError(f"Error with input length {len(input_ids)} vs {batch_length}") if len(attention_mask) != batch_length: raise ValueError(f"Error with input length {len(attention_mask)} vs {batch_length}") if self.mode == "classification": label = label_map[example.label] elif self.mode == "regression": label = float(example.label) else: raise ValueError(self.mode) if ex_index < 5 and self.verbose: logger.info("*** Example ***") logger.info(f"guid: {example.guid}") logger.info(f"input_ids: {' '.join([str(x) for x in input_ids])}") logger.info(f"attention_mask: {' '.join([str(x) for x in attention_mask])}") logger.info(f"label: {example.label} (id = {label})") features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, label=label)) if return_tensors is None: return features elif return_tensors == "tf": if not is_tf_available(): raise RuntimeError("return_tensors set to 'tf' but TensorFlow 2.0 can't be imported") import tensorflow as tf def gen(): for ex in features: yield ({"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label) dataset = tf.data.Dataset.from_generator( gen, ({"input_ids": tf.int32, "attention_mask": tf.int32}, tf.int64), ({"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])}, tf.TensorShape([])), ) return dataset elif return_tensors == "pt": if not is_torch_available(): raise RuntimeError("return_tensors set to 'pt' but PyTorch can't be imported") import torch from torch.utils.data import TensorDataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) if self.mode == "classification": all_labels = torch.tensor([f.label for f in features], dtype=torch.long) elif self.mode == "regression": all_labels = torch.tensor([f.label for f in features], dtype=torch.float) dataset = TensorDataset(all_input_ids, all_attention_mask, all_labels) return dataset else: raise ValueError("return_tensors should be one of 'tf' or 'pt'")
13,829
38.514286
118
py
transformers
transformers-main/src/transformers/data/processors/__init__.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
1,014
52.421053
118
py
transformers
transformers-main/src/transformers/data/processors/xnli.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ XNLI utils (dataset loading and evaluation)""" import os from ...utils import logging from .utils import DataProcessor, InputExample logger = logging.get_logger(__name__) class XnliProcessor(DataProcessor): """ Processor for the XNLI dataset. Adapted from https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/run_classifier.py#L207 """ def __init__(self, language, train_language=None): self.language = language self.train_language = train_language def get_train_examples(self, data_dir): """See base class.""" lg = self.language if self.train_language is None else self.train_language lines = self._read_tsv(os.path.join(data_dir, f"XNLI-MT-1.0/multinli/multinli.train.{lg}.tsv")) examples = [] for i, line in enumerate(lines): if i == 0: continue guid = f"train-{i}" text_a = line[0] text_b = line[1] label = "contradiction" if line[2] == "contradictory" else line[2] if not isinstance(text_a, str): raise ValueError(f"Training input {text_a} is not a string") if not isinstance(text_b, str): raise ValueError(f"Training input {text_b} is not a string") if not isinstance(label, str): raise ValueError(f"Training label {label} is not a string") examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_test_examples(self, data_dir): """See base class.""" lines = self._read_tsv(os.path.join(data_dir, "XNLI-1.0/xnli.test.tsv")) examples = [] for i, line in enumerate(lines): if i == 0: continue language = line[0] if language != self.language: continue guid = f"test-{i}" text_a = line[6] text_b = line[7] label = line[1] if not isinstance(text_a, str): raise ValueError(f"Training input {text_a} is not a string") if not isinstance(text_b, str): raise ValueError(f"Training input {text_b} is not a string") if not isinstance(label, str): raise ValueError(f"Training label {label} is not a string") examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] xnli_processors = { "xnli": XnliProcessor, } xnli_output_modes = { "xnli": "classification", } xnli_tasks_num_labels = { "xnli": 3, }
3,489
34.612245
112
py
transformers
transformers-main/src/transformers/pipelines/image_segmentation.py
from typing import Any, Dict, List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import ( MODEL_FOR_IMAGE_SEGMENTATION_MAPPING, MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING, ) logger = logging.get_logger(__name__) Prediction = Dict[str, Any] Predictions = List[Prediction] @add_end_docstrings(PIPELINE_INIT_ARGS) class ImageSegmentationPipeline(Pipeline): """ Image segmentation pipeline using any `AutoModelForXXXSegmentation`. This pipeline predicts masks of objects and their classes. Example: ```python >>> from transformers import pipeline >>> segmenter = pipeline(model="facebook/detr-resnet-50-panoptic") >>> segments = segmenter("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png") >>> len(segments) 2 >>> segments[0]["label"] 'bird' >>> segments[1]["label"] 'bird' >>> type(segments[0]["mask"]) # This is a black and white mask showing where is the bird on the original image. <class 'PIL.Image.Image'> >>> segments[0]["mask"].size (768, 512) ``` This image segmentation pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"image-segmentation"`. See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=image-segmentation). """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.framework == "tf": raise ValueError(f"The {self.__class__} is only available in PyTorch.") requires_backends(self, "vision") self.check_model_type( dict( MODEL_FOR_IMAGE_SEGMENTATION_MAPPING.items() + MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING.items() + MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING.items() + MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING.items() ) ) def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} postprocess_kwargs = {} if "subtask" in kwargs: postprocess_kwargs["subtask"] = kwargs["subtask"] preprocess_kwargs["subtask"] = kwargs["subtask"] if "threshold" in kwargs: postprocess_kwargs["threshold"] = kwargs["threshold"] if "mask_threshold" in kwargs: postprocess_kwargs["mask_threshold"] = kwargs["mask_threshold"] if "overlap_mask_area_threshold" in kwargs: postprocess_kwargs["overlap_mask_area_threshold"] = kwargs["overlap_mask_area_threshold"] return preprocess_kwargs, {}, postprocess_kwargs def __call__(self, images, **kwargs) -> Union[Predictions, List[Prediction]]: """ Perform segmentation (detect masks & classes) in the image(s) passed as inputs. Args: images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): The pipeline handles three types of images: - A string containing an HTTP(S) link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the same format: all as HTTP(S) links, all as local paths, or all as PIL images. subtask (`str`, *optional*): Segmentation task to be performed, choose [`semantic`, `instance` and `panoptic`] depending on model capabilities. If not set, the pipeline will attempt tp resolve in the following order: `panoptic`, `instance`, `semantic`. threshold (`float`, *optional*, defaults to 0.9): Probability threshold to filter out predicted masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.5): Mask overlap threshold to eliminate small, disconnected segments. Return: A dictionary or a list of dictionaries containing the result. If the input is a single image, will return a list of dictionaries, if the input is a list of several images, will return a list of list of dictionaries corresponding to each image. The dictionaries contain the mask, label and score (where applicable) of each detected object and contains the following keys: - **label** (`str`) -- The class label identified by the model. - **mask** (`PIL.Image`) -- A binary mask of the detected object as a Pil Image of shape (width, height) of the original image. Returns a mask filled with zeros if no object is found. - **score** (*optional* `float`) -- Optionally, when the model is capable of estimating a confidence of the "object" described by the label and the mask. """ return super().__call__(images, **kwargs) def preprocess(self, image, subtask=None): image = load_image(image) target_size = [(image.height, image.width)] if self.model.config.__class__.__name__ == "OneFormerConfig": if subtask is None: kwargs = {} else: kwargs = {"task_inputs": [subtask]} inputs = self.image_processor(images=[image], return_tensors="pt", **kwargs) inputs["task_inputs"] = self.tokenizer( inputs["task_inputs"], padding="max_length", max_length=self.model.config.task_seq_len, return_tensors=self.framework, )["input_ids"] else: inputs = self.image_processor(images=[image], return_tensors="pt") inputs["target_size"] = target_size return inputs def _forward(self, model_inputs): target_size = model_inputs.pop("target_size") model_outputs = self.model(**model_inputs) model_outputs["target_size"] = target_size return model_outputs def postprocess( self, model_outputs, subtask=None, threshold=0.9, mask_threshold=0.5, overlap_mask_area_threshold=0.5 ): fn = None if subtask in {"panoptic", None} and hasattr(self.image_processor, "post_process_panoptic_segmentation"): fn = self.image_processor.post_process_panoptic_segmentation elif subtask in {"instance", None} and hasattr(self.image_processor, "post_process_instance_segmentation"): fn = self.image_processor.post_process_instance_segmentation if fn is not None: outputs = fn( model_outputs, threshold=threshold, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, target_sizes=model_outputs["target_size"], )[0] annotation = [] segmentation = outputs["segmentation"] for segment in outputs["segments_info"]: mask = (segmentation == segment["id"]) * 255 mask = Image.fromarray(mask.numpy().astype(np.uint8), mode="L") label = self.model.config.id2label[segment["label_id"]] score = segment["score"] annotation.append({"score": score, "label": label, "mask": mask}) elif subtask in {"semantic", None} and hasattr(self.image_processor, "post_process_semantic_segmentation"): outputs = self.image_processor.post_process_semantic_segmentation( model_outputs, target_sizes=model_outputs["target_size"] )[0] annotation = [] segmentation = outputs.numpy() labels = np.unique(segmentation) for label in labels: mask = (segmentation == label) * 255 mask = Image.fromarray(mask.astype(np.uint8), mode="L") label = self.model.config.id2label[label] annotation.append({"score": None, "label": label, "mask": mask}) else: raise ValueError(f"Subtask {subtask} is not supported for model {type(self.model)}") return annotation
8,731
40.580952
119
py
transformers
transformers-main/src/transformers/pipelines/base.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import csv import importlib import json import os import pickle import sys import types import warnings from abc import ABC, abstractmethod from collections import UserDict from contextlib import contextmanager from os.path import abspath, exists from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union from packaging import version from ..dynamic_module_utils import custom_object_save from ..feature_extraction_utils import PreTrainedFeatureExtractor from ..image_processing_utils import BaseImageProcessor from ..modelcard import ModelCard from ..models.auto.configuration_auto import AutoConfig from ..tokenization_utils import PreTrainedTokenizer from ..utils import ModelOutput, add_end_docstrings, infer_framework, is_tf_available, is_torch_available, logging GenericTensor = Union[List["GenericTensor"], "torch.Tensor", "tf.Tensor"] if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TFAutoModel if is_torch_available(): import torch from torch.utils.data import DataLoader, Dataset from ..models.auto.modeling_auto import AutoModel # Re-export for backward compatibility from .pt_utils import KeyDataset else: Dataset = None KeyDataset = None if TYPE_CHECKING: from ..modeling_tf_utils import TFPreTrainedModel from ..modeling_utils import PreTrainedModel logger = logging.get_logger(__name__) def no_collate_fn(items): if len(items) != 1: raise ValueError("This collate_fn is meant to be used with batch_size=1") return items[0] def _pad(items, key, padding_value, padding_side): batch_size = len(items) if isinstance(items[0][key], torch.Tensor): # Others include `attention_mask` etc... shape = items[0][key].shape dim = len(shape) if key in ["pixel_values", "image"]: # This is probable image so padding shouldn't be necessary # B, C, H, W return torch.cat([item[key] for item in items], dim=0) elif dim == 4 and key == "input_features": # this is probably a mel spectrogram batched return torch.cat([item[key] for item in items], dim=0) max_length = max(item[key].shape[1] for item in items) min_length = min(item[key].shape[1] for item in items) dtype = items[0][key].dtype if dim == 2: if max_length == min_length: # Bypass for `ImageGPT` which doesn't provide a padding value, yet # we can consistently pad since the size should be matching return torch.cat([item[key] for item in items], dim=0) tensor = torch.zeros((batch_size, max_length), dtype=dtype) + padding_value elif dim == 3: tensor = torch.zeros((batch_size, max_length, shape[-1]), dtype=dtype) + padding_value elif dim == 4: tensor = torch.zeros((batch_size, max_length, shape[-2], shape[-1]), dtype=dtype) + padding_value for i, item in enumerate(items): if dim == 2: if padding_side == "left": tensor[i, -len(item[key][0]) :] = item[key][0].clone() else: tensor[i, : len(item[key][0])] = item[key][0].clone() elif dim == 3: if padding_side == "left": tensor[i, -len(item[key][0]) :, :] = item[key][0].clone() else: tensor[i, : len(item[key][0]), :] = item[key][0].clone() elif dim == 4: if padding_side == "left": tensor[i, -len(item[key][0]) :, :, :] = item[key][0].clone() else: tensor[i, : len(item[key][0]), :, :] = item[key][0].clone() return tensor else: return [item[key] for item in items] def pad_collate_fn(tokenizer, feature_extractor): # Tokenizer t_padding_side = None # Feature extractor f_padding_side = None if tokenizer is None and feature_extractor is None: raise ValueError("Pipeline without tokenizer or feature_extractor cannot do batching") if tokenizer is not None: if tokenizer.pad_token_id is None: raise ValueError( "Pipeline with tokenizer without pad_token cannot do batching. You can try to set it with " "`pipe.tokenizer.pad_token_id = model.config.eos_token_id`." ) else: t_padding_value = tokenizer.pad_token_id t_padding_side = tokenizer.padding_side if feature_extractor is not None: # Feature extractor can be images, where no padding is expected f_padding_value = getattr(feature_extractor, "padding_value", None) f_padding_side = getattr(feature_extractor, "padding_side", None) if t_padding_side is not None and f_padding_side is not None and t_padding_side != f_padding_side: raise ValueError( f"The feature extractor, and tokenizer don't agree on padding side {t_padding_side} != {f_padding_side}" ) padding_side = "right" if t_padding_side is not None: padding_side = t_padding_side if f_padding_side is not None: padding_side = f_padding_side def inner(items): keys = set(items[0].keys()) for item in items: if set(item.keys()) != keys: raise ValueError( f"The elements of the batch contain different keys. Cannot batch them ({set(item.keys())} !=" f" {keys})" ) # input_values, input_pixels, input_ids, ... padded = {} for key in keys: if key in {"input_ids"}: # ImageGPT uses a feature extractor if tokenizer is None and feature_extractor is not None: _padding_value = f_padding_value else: _padding_value = t_padding_value elif key in {"input_values", "pixel_values", "input_features"}: _padding_value = f_padding_value elif key in {"p_mask", "special_tokens_mask"}: _padding_value = 1 elif key in {"attention_mask", "token_type_ids"}: _padding_value = 0 else: # This is likely another random key maybe even user provided _padding_value = 0 padded[key] = _pad(items, key, _padding_value, padding_side) return padded return inner def infer_framework_load_model( model, config: AutoConfig, model_classes: Optional[Dict[str, Tuple[type]]] = None, task: Optional[str] = None, framework: Optional[str] = None, **model_kwargs, ): """ Select framework (TensorFlow or PyTorch) to use from the `model` passed. Returns a tuple (framework, model). If `model` is instantiated, this function will just infer the framework from the model class. Otherwise `model` is actually a checkpoint name and this method will try to instantiate it using `model_classes`. Since we don't want to instantiate the model twice, this model is returned for use by the pipeline. If both frameworks are installed and available for `model`, PyTorch is selected. Args: model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel`]): The model to infer the framework from. If `str`, a checkpoint name. The model to infer the framewrok from. config ([`AutoConfig`]): The config associated with the model to help using the correct class model_classes (dictionary `str` to `type`, *optional*): A mapping framework to class. task (`str`): The task defining which pipeline will be returned. model_kwargs: Additional dictionary of keyword arguments passed along to the model's `from_pretrained(..., **model_kwargs)` function. Returns: `Tuple`: A tuple framework, model. """ if not is_tf_available() and not is_torch_available(): raise RuntimeError( "At least one of TensorFlow 2.0 or PyTorch should be installed. " "To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ " "To install PyTorch, read the instructions at https://pytorch.org/." ) if isinstance(model, str): model_kwargs["_from_pipeline"] = task class_tuple = () look_pt = is_torch_available() and framework in {"pt", None} look_tf = is_tf_available() and framework in {"tf", None} if model_classes: if look_pt: class_tuple = class_tuple + model_classes.get("pt", (AutoModel,)) if look_tf: class_tuple = class_tuple + model_classes.get("tf", (TFAutoModel,)) if config.architectures: classes = [] for architecture in config.architectures: transformers_module = importlib.import_module("transformers") if look_pt: _class = getattr(transformers_module, architecture, None) if _class is not None: classes.append(_class) if look_tf: _class = getattr(transformers_module, f"TF{architecture}", None) if _class is not None: classes.append(_class) class_tuple = class_tuple + tuple(classes) if len(class_tuple) == 0: raise ValueError(f"Pipeline cannot infer suitable model classes from {model}") for model_class in class_tuple: kwargs = model_kwargs.copy() if framework == "pt" and model.endswith(".h5"): kwargs["from_tf"] = True logger.warning( "Model might be a TensorFlow model (ending with `.h5`) but TensorFlow is not available. " "Trying to load the model with PyTorch." ) elif framework == "tf" and model.endswith(".bin"): kwargs["from_pt"] = True logger.warning( "Model might be a PyTorch model (ending with `.bin`) but PyTorch is not available. " "Trying to load the model with Tensorflow." ) try: model = model_class.from_pretrained(model, **kwargs) if hasattr(model, "eval"): model = model.eval() # Stop loading on the first successful load. break except (OSError, ValueError): continue if isinstance(model, str): raise ValueError(f"Could not load model {model} with any of the following classes: {class_tuple}.") if framework is None: framework = infer_framework(model.__class__) return framework, model def infer_framework_from_model( model, model_classes: Optional[Dict[str, Tuple[type]]] = None, task: Optional[str] = None, framework: Optional[str] = None, **model_kwargs, ): """ Select framework (TensorFlow or PyTorch) to use from the `model` passed. Returns a tuple (framework, model). If `model` is instantiated, this function will just infer the framework from the model class. Otherwise `model` is actually a checkpoint name and this method will try to instantiate it using `model_classes`. Since we don't want to instantiate the model twice, this model is returned for use by the pipeline. If both frameworks are installed and available for `model`, PyTorch is selected. Args: model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel`]): The model to infer the framework from. If `str`, a checkpoint name. The model to infer the framewrok from. model_classes (dictionary `str` to `type`, *optional*): A mapping framework to class. task (`str`): The task defining which pipeline will be returned. model_kwargs: Additional dictionary of keyword arguments passed along to the model's `from_pretrained(..., **model_kwargs)` function. Returns: `Tuple`: A tuple framework, model. """ if isinstance(model, str): config = AutoConfig.from_pretrained(model, _from_pipeline=task, **model_kwargs) else: config = model.config return infer_framework_load_model( model, config, model_classes=model_classes, _from_pipeline=task, task=task, framework=framework, **model_kwargs ) def get_framework(model, revision: Optional[str] = None): """ Select framework (TensorFlow or PyTorch) to use. Args: model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel`]): If both frameworks are installed, picks the one corresponding to the model passed (either a model class or the model name). If no specific model is provided, defaults to using PyTorch. """ warnings.warn( "`get_framework` is deprecated and will be removed in v5, use `infer_framework_from_model` instead.", FutureWarning, ) if not is_tf_available() and not is_torch_available(): raise RuntimeError( "At least one of TensorFlow 2.0 or PyTorch should be installed. " "To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ " "To install PyTorch, read the instructions at https://pytorch.org/." ) if isinstance(model, str): if is_torch_available() and not is_tf_available(): model = AutoModel.from_pretrained(model, revision=revision) elif is_tf_available() and not is_torch_available(): model = TFAutoModel.from_pretrained(model, revision=revision) else: try: model = AutoModel.from_pretrained(model, revision=revision) except OSError: model = TFAutoModel.from_pretrained(model, revision=revision) framework = infer_framework(model.__class__) return framework def get_default_model_and_revision( targeted_task: Dict, framework: Optional[str], task_options: Optional[Any] ) -> Union[str, Tuple[str, str]]: """ Select a default model to use for a given task. Defaults to pytorch if ambiguous. Args: targeted_task (`Dict` ): Dictionary representing the given task, that should contain default models framework (`str`, None) "pt", "tf" or None, representing a specific framework if it was specified, or None if we don't know yet. task_options (`Any`, None) Any further value required by the task to get fully specified, for instance (SRC, TGT) languages for translation task. Returns `str` The model string representing the default model for this pipeline """ if is_torch_available() and not is_tf_available(): framework = "pt" elif is_tf_available() and not is_torch_available(): framework = "tf" defaults = targeted_task["default"] if task_options: if task_options not in defaults: raise ValueError(f"The task does not provide any default models for options {task_options}") default_models = defaults[task_options]["model"] elif "model" in defaults: default_models = targeted_task["default"]["model"] else: # XXX This error message needs to be updated to be more generic if more tasks are going to become # parametrized raise ValueError('The task defaults can\'t be correctly selected. You probably meant "translation_XX_to_YY"') if framework is None: framework = "pt" return default_models[framework] class PipelineException(Exception): """ Raised by a [`Pipeline`] when handling __call__. Args: task (`str`): The task of the pipeline. model (`str`): The model used by the pipeline. reason (`str`): The error message to display. """ def __init__(self, task: str, model: str, reason: str): super().__init__(reason) self.task = task self.model = model class ArgumentHandler(ABC): """ Base interface for handling arguments for each [`~pipelines.Pipeline`]. """ @abstractmethod def __call__(self, *args, **kwargs): raise NotImplementedError() class PipelineDataFormat: """ Base class for all the pipeline supported data format both for reading and writing. Supported data formats currently includes: - JSON - CSV - stdin/stdout (pipe) `PipelineDataFormat` also includes some utilities to work with multi-columns like mapping from datasets columns to pipelines keyword arguments through the `dataset_kwarg_1=dataset_column_1` format. Args: output_path (`str`, *optional*): Where to save the outgoing data. input_path (`str`, *optional*): Where to look for the input data. column (`str`, *optional*): The column to read. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the `output_path`. """ SUPPORTED_FORMATS = ["json", "csv", "pipe"] def __init__( self, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite: bool = False, ): self.output_path = output_path self.input_path = input_path self.column = column.split(",") if column is not None else [""] self.is_multi_columns = len(self.column) > 1 if self.is_multi_columns: self.column = [tuple(c.split("=")) if "=" in c else (c, c) for c in self.column] if output_path is not None and not overwrite: if exists(abspath(self.output_path)): raise OSError(f"{self.output_path} already exists on disk") if input_path is not None: if not exists(abspath(self.input_path)): raise OSError(f"{self.input_path} doesnt exist on disk") @abstractmethod def __iter__(self): raise NotImplementedError() @abstractmethod def save(self, data: Union[dict, List[dict]]): """ Save the provided data object with the representation for the current [`~pipelines.PipelineDataFormat`]. Args: data (`dict` or list of `dict`): The data to store. """ raise NotImplementedError() def save_binary(self, data: Union[dict, List[dict]]) -> str: """ Save the provided data object as a pickle-formatted binary data on the disk. Args: data (`dict` or list of `dict`): The data to store. Returns: `str`: Path where the data has been saved. """ path, _ = os.path.splitext(self.output_path) binary_path = os.path.extsep.join((path, "pickle")) with open(binary_path, "wb+") as f_output: pickle.dump(data, f_output) return binary_path @staticmethod def from_str( format: str, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False, ) -> "PipelineDataFormat": """ Creates an instance of the right subclass of [`~pipelines.PipelineDataFormat`] depending on `format`. Args: format (`str`): The format of the desired pipeline. Acceptable values are `"json"`, `"csv"` or `"pipe"`. output_path (`str`, *optional*): Where to save the outgoing data. input_path (`str`, *optional*): Where to look for the input data. column (`str`, *optional*): The column to read. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the `output_path`. Returns: [`~pipelines.PipelineDataFormat`]: The proper data format. """ if format == "json": return JsonPipelineDataFormat(output_path, input_path, column, overwrite=overwrite) elif format == "csv": return CsvPipelineDataFormat(output_path, input_path, column, overwrite=overwrite) elif format == "pipe": return PipedPipelineDataFormat(output_path, input_path, column, overwrite=overwrite) else: raise KeyError(f"Unknown reader {format} (Available reader are json/csv/pipe)") class CsvPipelineDataFormat(PipelineDataFormat): """ Support for pipelines using CSV data format. Args: output_path (`str`, *optional*): Where to save the outgoing data. input_path (`str`, *optional*): Where to look for the input data. column (`str`, *optional*): The column to read. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the `output_path`. """ def __init__( self, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False, ): super().__init__(output_path, input_path, column, overwrite=overwrite) def __iter__(self): with open(self.input_path, "r") as f: reader = csv.DictReader(f) for row in reader: if self.is_multi_columns: yield {k: row[c] for k, c in self.column} else: yield row[self.column[0]] def save(self, data: List[dict]): """ Save the provided data object with the representation for the current [`~pipelines.PipelineDataFormat`]. Args: data (`List[dict]`): The data to store. """ with open(self.output_path, "w") as f: if len(data) > 0: writer = csv.DictWriter(f, list(data[0].keys())) writer.writeheader() writer.writerows(data) class JsonPipelineDataFormat(PipelineDataFormat): """ Support for pipelines using JSON file format. Args: output_path (`str`, *optional*): Where to save the outgoing data. input_path (`str`, *optional*): Where to look for the input data. column (`str`, *optional*): The column to read. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the `output_path`. """ def __init__( self, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False, ): super().__init__(output_path, input_path, column, overwrite=overwrite) with open(input_path, "r") as f: self._entries = json.load(f) def __iter__(self): for entry in self._entries: if self.is_multi_columns: yield {k: entry[c] for k, c in self.column} else: yield entry[self.column[0]] def save(self, data: dict): """ Save the provided data object in a json file. Args: data (`dict`): The data to store. """ with open(self.output_path, "w") as f: json.dump(data, f) class PipedPipelineDataFormat(PipelineDataFormat): """ Read data from piped input to the python process. For multi columns data, columns should separated by \t If columns are provided, then the output will be a dictionary with {column_x: value_x} Args: output_path (`str`, *optional*): Where to save the outgoing data. input_path (`str`, *optional*): Where to look for the input data. column (`str`, *optional*): The column to read. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the `output_path`. """ def __iter__(self): for line in sys.stdin: # Split for multi-columns if "\t" in line: line = line.split("\t") if self.column: # Dictionary to map arguments yield {kwargs: l for (kwargs, _), l in zip(self.column, line)} else: yield tuple(line) # No dictionary to map arguments else: yield line def save(self, data: dict): """ Print the data. Args: data (`dict`): The data to store. """ print(data) def save_binary(self, data: Union[dict, List[dict]]) -> str: if self.output_path is None: raise KeyError( "When using piped input on pipeline outputting large object requires an output file path. " "Please provide such output path through --output argument." ) return super().save_binary(data) class _ScikitCompat(ABC): """ Interface layer for the Scikit and Keras compatibility. """ @abstractmethod def transform(self, X): raise NotImplementedError() @abstractmethod def predict(self, X): raise NotImplementedError() PIPELINE_INIT_ARGS = r""" Arguments: model ([`PreTrainedModel`] or [`TFPreTrainedModel`]): The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from [`PreTrainedModel`] for PyTorch and [`TFPreTrainedModel`] for TensorFlow. tokenizer ([`PreTrainedTokenizer`]): The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from [`PreTrainedTokenizer`]. modelcard (`str` or [`ModelCard`], *optional*): Model card attributed to the model for this pipeline. framework (`str`, *optional*): The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be installed. If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is provided. task (`str`, defaults to `""`): A task-identifier for the pipeline. num_workers (`int`, *optional*, defaults to 8): When the pipeline will use *DataLoader* (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used. batch_size (`int`, *optional*, defaults to 1): When the pipeline will use *DataLoader* (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read [Batching with pipelines](https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching) . args_parser ([`~pipelines.ArgumentHandler`], *optional*): Reference to the object in charge of parsing supplied pipeline parameters. device (`int`, *optional*, defaults to -1): Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native `torch.device` or a `str` too. binary_output (`bool`, *optional*, defaults to `False`): Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text. """ if is_torch_available(): from transformers.pipelines.pt_utils import ( PipelineChunkIterator, PipelineDataset, PipelineIterator, PipelinePackIterator, ) @add_end_docstrings(PIPELINE_INIT_ARGS) class Pipeline(_ScikitCompat): """ The Pipeline class is the class from which all pipelines inherit. Refer to this class for methods shared across different pipelines. Base class implementing pipelined operations. Pipeline workflow is defined as a sequence of the following operations: Input -> Tokenization -> Model Inference -> Post-Processing (task dependent) -> Output Pipeline supports running on CPU or GPU through the device argument (see below). Some pipeline, like for instance [`FeatureExtractionPipeline`] (`'feature-extraction'`) output large tensor object as nested-lists. In order to avoid dumping such large structure as textual data we provide the `binary_output` constructor argument. If set to `True`, the output will be stored in the pickle format. """ default_input_names = None def __init__( self, model: Union["PreTrainedModel", "TFPreTrainedModel"], tokenizer: Optional[PreTrainedTokenizer] = None, feature_extractor: Optional[PreTrainedFeatureExtractor] = None, image_processor: Optional[BaseImageProcessor] = None, modelcard: Optional[ModelCard] = None, framework: Optional[str] = None, task: str = "", args_parser: ArgumentHandler = None, device: Union[int, "torch.device"] = None, torch_dtype: Optional[Union[str, "torch.dtype"]] = None, binary_output: bool = False, **kwargs, ): if framework is None: framework, model = infer_framework_load_model(model, config=model.config) self.task = task self.model = model self.tokenizer = tokenizer self.feature_extractor = feature_extractor self.image_processor = image_processor self.modelcard = modelcard self.framework = framework if self.framework == "pt" and device is not None and not (isinstance(device, int) and device < 0): self.model.to(device) if device is None: # `accelerate` device map hf_device_map = getattr(self.model, "hf_device_map", None) if hf_device_map is not None: # Take the first device used by `accelerate`. device = next(iter(hf_device_map.values())) else: device = -1 if is_torch_available() and self.framework == "pt": if isinstance(device, torch.device): self.device = device elif isinstance(device, str): self.device = torch.device(device) elif device < 0: self.device = torch.device("cpu") else: self.device = torch.device(f"cuda:{device}") else: self.device = device if device is not None else -1 self.torch_dtype = torch_dtype self.binary_output = binary_output # Update config and generation_config with task specific parameters task_specific_params = self.model.config.task_specific_params if task_specific_params is not None and task in task_specific_params: self.model.config.update(task_specific_params.get(task)) if self.model.can_generate(): self.model.generation_config.update(**task_specific_params.get(task)) self.call_count = 0 self._batch_size = kwargs.pop("batch_size", None) self._num_workers = kwargs.pop("num_workers", None) self._preprocess_params, self._forward_params, self._postprocess_params = self._sanitize_parameters(**kwargs) if self.image_processor is None and self.feature_extractor is not None: if isinstance(self.feature_extractor, BaseImageProcessor): # Backward compatible change, if users called # ImageSegmentationPipeline(.., feature_extractor=MyFeatureExtractor()) # then we should keep working self.image_processor = self.feature_extractor def save_pretrained(self, save_directory: str, safe_serialization: bool = False): """ Save the pipeline's model and tokenizer. Args: save_directory (`str`): A path to the directory where to saved. It will be created if it doesn't exist. safe_serialization (`str`): Whether to save the model using `safetensors` or the traditional way for PyTorch or Tensorflow """ if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") return os.makedirs(save_directory, exist_ok=True) if hasattr(self, "_registered_impl"): # Add info to the config pipeline_info = self._registered_impl.copy() custom_pipelines = {} for task, info in pipeline_info.items(): if info["impl"] != self.__class__: continue info = info.copy() module_name = info["impl"].__module__ last_module = module_name.split(".")[-1] # Change classes into their names/full names info["impl"] = f"{last_module}.{info['impl'].__name__}" info["pt"] = tuple(c.__name__ for c in info["pt"]) info["tf"] = tuple(c.__name__ for c in info["tf"]) custom_pipelines[task] = info self.model.config.custom_pipelines = custom_pipelines # Save the pipeline custom code custom_object_save(self, save_directory) self.model.save_pretrained(save_directory, safe_serialization=safe_serialization) if self.tokenizer is not None: self.tokenizer.save_pretrained(save_directory) if self.feature_extractor is not None: self.feature_extractor.save_pretrained(save_directory) if self.modelcard is not None: self.modelcard.save_pretrained(save_directory) def transform(self, X): """ Scikit / Keras interface to transformers' pipelines. This method will forward to __call__(). """ return self(X) def predict(self, X): """ Scikit / Keras interface to transformers' pipelines. This method will forward to __call__(). """ return self(X) @contextmanager def device_placement(self): """ Context Manager allowing tensor allocation on the user-specified device in framework agnostic way. Returns: Context manager Examples: ```python # Explicitly ask for tensor allocation on CUDA device :0 pipe = pipeline(..., device=0) with pipe.device_placement(): # Every framework specific tensor allocation will be done on the request device output = pipe(...) ```""" if self.framework == "tf": with tf.device("/CPU:0" if self.device == -1 else f"/device:GPU:{self.device}"): yield else: if self.device.type == "cuda": with torch.cuda.device(self.device): yield else: yield def ensure_tensor_on_device(self, **inputs): """ Ensure PyTorch tensors are on the specified device. Args: inputs (keyword arguments that should be `torch.Tensor`, the rest is ignored): The tensors to place on `self.device`. Recursive on lists **only**. Return: `Dict[str, torch.Tensor]`: The same as `inputs` but on the proper device. """ return self._ensure_tensor_on_device(inputs, self.device) def _ensure_tensor_on_device(self, inputs, device): if isinstance(inputs, ModelOutput): return ModelOutput( {name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()} ) elif isinstance(inputs, dict): return {name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()} elif isinstance(inputs, UserDict): return UserDict({name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()}) elif isinstance(inputs, list): return [self._ensure_tensor_on_device(item, device) for item in inputs] elif isinstance(inputs, tuple): return tuple([self._ensure_tensor_on_device(item, device) for item in inputs]) elif isinstance(inputs, torch.Tensor): if device == torch.device("cpu") and inputs.dtype in {torch.float16, torch.bfloat16}: inputs = inputs.float() return inputs.to(device) else: return inputs def check_model_type(self, supported_models: Union[List[str], dict]): """ Check if the model class is in supported by the pipeline. Args: supported_models (`List[str]` or `dict`): The list of models supported by the pipeline, or a dictionary with model class values. """ if not isinstance(supported_models, list): # Create from a model mapping supported_models_names = [] for config, model in supported_models.items(): # Mapping can now contain tuples of models for the same configuration. if isinstance(model, tuple): supported_models_names.extend([_model.__name__ for _model in model]) else: supported_models_names.append(model.__name__) supported_models = supported_models_names if self.model.__class__.__name__ not in supported_models: logger.error( f"The model '{self.model.__class__.__name__}' is not supported for {self.task}. Supported models are" f" {supported_models}." ) @abstractmethod def _sanitize_parameters(self, **pipeline_parameters): """ _sanitize_parameters will be called with any excessive named arguments from either `__init__` or `__call__` methods. It should return 3 dictionnaries of the resolved parameters used by the various `preprocess`, `forward` and `postprocess` methods. Do not fill dictionnaries if the caller didn't specify a kwargs. This let's you keep defaults in function signatures, which is more "natural". It is not meant to be called directly, it will be automatically called and the final parameters resolved by `__init__` and `__call__` """ raise NotImplementedError("_sanitize_parameters not implemented") @abstractmethod def preprocess(self, input_: Any, **preprocess_parameters: Dict) -> Dict[str, GenericTensor]: """ Preprocess will take the `input_` of a specific pipeline and return a dictionary of everything necessary for `_forward` to run properly. It should contain at least one tensor, but might have arbitrary other items. """ raise NotImplementedError("preprocess not implemented") @abstractmethod def _forward(self, input_tensors: Dict[str, GenericTensor], **forward_parameters: Dict) -> ModelOutput: """ _forward will receive the prepared dictionary from `preprocess` and run it on the model. This method might involve the GPU or the CPU and should be agnostic to it. Isolating this function is the reason for `preprocess` and `postprocess` to exist, so that the hot path, this method generally can run as fast as possible. It is not meant to be called directly, `forward` is preferred. It is basically the same but contains additional code surrounding `_forward` making sure tensors and models are on the same device, disabling the training part of the code (leading to faster inference). """ raise NotImplementedError("_forward not implemented") @abstractmethod def postprocess(self, model_outputs: ModelOutput, **postprocess_parameters: Dict) -> Any: """ Postprocess will receive the raw outputs of the `_forward` method, generally tensors, and reformat them into something more friendly. Generally it will output a list or a dict or results (containing just strings and numbers). """ raise NotImplementedError("postprocess not implemented") def get_inference_context(self): inference_context = ( torch.inference_mode if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.9.0") else torch.no_grad ) return inference_context def forward(self, model_inputs, **forward_params): with self.device_placement(): if self.framework == "tf": model_inputs["training"] = False model_outputs = self._forward(model_inputs, **forward_params) elif self.framework == "pt": inference_context = self.get_inference_context() with inference_context(): model_inputs = self._ensure_tensor_on_device(model_inputs, device=self.device) model_outputs = self._forward(model_inputs, **forward_params) model_outputs = self._ensure_tensor_on_device(model_outputs, device=torch.device("cpu")) else: raise ValueError(f"Framework {self.framework} is not supported") return model_outputs def get_iterator( self, inputs, num_workers: int, batch_size: int, preprocess_params, forward_params, postprocess_params ): if isinstance(inputs, collections.abc.Sized): dataset = PipelineDataset(inputs, self.preprocess, preprocess_params) else: if num_workers > 1: logger.warning( "For iterable dataset using num_workers>1 is likely to result" " in errors since everything is iterable, setting `num_workers=1`" " to guarantee correctness." ) num_workers = 1 dataset = PipelineIterator(inputs, self.preprocess, preprocess_params) if "TOKENIZERS_PARALLELISM" not in os.environ: logger.info("Disabling tokenizer parallelism, we're using DataLoader multithreading already") os.environ["TOKENIZERS_PARALLELISM"] = "false" # TODO hack by collating feature_extractor and image_processor feature_extractor = self.feature_extractor if self.feature_extractor is not None else self.image_processor collate_fn = no_collate_fn if batch_size == 1 else pad_collate_fn(self.tokenizer, feature_extractor) dataloader = DataLoader(dataset, num_workers=num_workers, batch_size=batch_size, collate_fn=collate_fn) model_iterator = PipelineIterator(dataloader, self.forward, forward_params, loader_batch_size=batch_size) final_iterator = PipelineIterator(model_iterator, self.postprocess, postprocess_params) return final_iterator def __call__(self, inputs, *args, num_workers=None, batch_size=None, **kwargs): if args: logger.warning(f"Ignoring args : {args}") if num_workers is None: if self._num_workers is None: num_workers = 0 else: num_workers = self._num_workers if batch_size is None: if self._batch_size is None: batch_size = 1 else: batch_size = self._batch_size preprocess_params, forward_params, postprocess_params = self._sanitize_parameters(**kwargs) # Fuse __init__ params and __call__ params without modifying the __init__ ones. preprocess_params = {**self._preprocess_params, **preprocess_params} forward_params = {**self._forward_params, **forward_params} postprocess_params = {**self._postprocess_params, **postprocess_params} self.call_count += 1 if self.call_count > 10 and self.framework == "pt" and self.device.type == "cuda": warnings.warn( "You seem to be using the pipelines sequentially on GPU. In order to maximize efficiency please use a" " dataset", UserWarning, ) is_dataset = Dataset is not None and isinstance(inputs, Dataset) is_generator = isinstance(inputs, types.GeneratorType) is_list = isinstance(inputs, list) is_iterable = is_dataset or is_generator or is_list # TODO make the get_iterator work also for `tf` (and `flax`). can_use_iterator = self.framework == "pt" and (is_dataset or is_generator or is_list) if is_list: if can_use_iterator: final_iterator = self.get_iterator( inputs, num_workers, batch_size, preprocess_params, forward_params, postprocess_params ) outputs = list(final_iterator) return outputs else: return self.run_multi(inputs, preprocess_params, forward_params, postprocess_params) elif can_use_iterator: return self.get_iterator( inputs, num_workers, batch_size, preprocess_params, forward_params, postprocess_params ) elif is_iterable: return self.iterate(inputs, preprocess_params, forward_params, postprocess_params) elif self.framework == "pt" and isinstance(self, ChunkPipeline): return next( iter( self.get_iterator( [inputs], num_workers, batch_size, preprocess_params, forward_params, postprocess_params ) ) ) else: return self.run_single(inputs, preprocess_params, forward_params, postprocess_params) def run_multi(self, inputs, preprocess_params, forward_params, postprocess_params): return [self.run_single(item, preprocess_params, forward_params, postprocess_params) for item in inputs] def run_single(self, inputs, preprocess_params, forward_params, postprocess_params): model_inputs = self.preprocess(inputs, **preprocess_params) model_outputs = self.forward(model_inputs, **forward_params) outputs = self.postprocess(model_outputs, **postprocess_params) return outputs def iterate(self, inputs, preprocess_params, forward_params, postprocess_params): # This function should become `get_iterator` again, this is a temporary # easy solution. for input_ in inputs: yield self.run_single(input_, preprocess_params, forward_params, postprocess_params) class ChunkPipeline(Pipeline): def run_single(self, inputs, preprocess_params, forward_params, postprocess_params): all_outputs = [] for model_inputs in self.preprocess(inputs, **preprocess_params): model_outputs = self.forward(model_inputs, **forward_params) all_outputs.append(model_outputs) outputs = self.postprocess(all_outputs, **postprocess_params) return outputs def get_iterator( self, inputs, num_workers: int, batch_size: int, preprocess_params, forward_params, postprocess_params ): if "TOKENIZERS_PARALLELISM" not in os.environ: logger.info("Disabling tokenizer parallelism, we're using DataLoader multithreading already") os.environ["TOKENIZERS_PARALLELISM"] = "false" if num_workers > 1: logger.warning( "For ChunkPipeline using num_workers>0 is likely to result in errors since everything is iterable," " setting `num_workers=1` to guarantee correctness." ) num_workers = 1 dataset = PipelineChunkIterator(inputs, self.preprocess, preprocess_params) # TODO hack by collating feature_extractor and image_processor feature_extractor = self.feature_extractor if self.feature_extractor is not None else self.image_processor collate_fn = no_collate_fn if batch_size == 1 else pad_collate_fn(self.tokenizer, feature_extractor) dataloader = DataLoader(dataset, num_workers=num_workers, batch_size=batch_size, collate_fn=collate_fn) model_iterator = PipelinePackIterator(dataloader, self.forward, forward_params, loader_batch_size=batch_size) final_iterator = PipelineIterator(model_iterator, self.postprocess, postprocess_params) return final_iterator class PipelineRegistry: def __init__(self, supported_tasks: Dict[str, Any], task_aliases: Dict[str, str]) -> None: self.supported_tasks = supported_tasks self.task_aliases = task_aliases def get_supported_tasks(self) -> List[str]: supported_task = list(self.supported_tasks.keys()) + list(self.task_aliases.keys()) supported_task.sort() return supported_task def check_task(self, task: str) -> Tuple[str, Dict, Any]: if task in self.task_aliases: task = self.task_aliases[task] if task in self.supported_tasks: targeted_task = self.supported_tasks[task] return task, targeted_task, None if task.startswith("translation"): tokens = task.split("_") if len(tokens) == 4 and tokens[0] == "translation" and tokens[2] == "to": targeted_task = self.supported_tasks["translation"] task = "translation" return task, targeted_task, (tokens[1], tokens[3]) raise KeyError(f"Invalid translation task {task}, use 'translation_XX_to_YY' format") raise KeyError( f"Unknown task {task}, available tasks are {self.get_supported_tasks() + ['translation_XX_to_YY']}" ) def register_pipeline( self, task: str, pipeline_class: type, pt_model: Optional[Union[type, Tuple[type]]] = None, tf_model: Optional[Union[type, Tuple[type]]] = None, default: Optional[Dict] = None, type: Optional[str] = None, ) -> None: if task in self.supported_tasks: logger.warning(f"{task} is already registered. Overwriting pipeline for task {task}...") if pt_model is None: pt_model = () elif not isinstance(pt_model, tuple): pt_model = (pt_model,) if tf_model is None: tf_model = () elif not isinstance(tf_model, tuple): tf_model = (tf_model,) task_impl = {"impl": pipeline_class, "pt": pt_model, "tf": tf_model} if default is not None: if "model" not in default and ("pt" in default or "tf" in default): default = {"model": default} task_impl["default"] = default if type is not None: task_impl["type"] = type self.supported_tasks[task] = task_impl pipeline_class._registered_impl = {task: task_impl} def to_dict(self): return self.supported_tasks
51,438
40.550081
119
py
transformers
transformers-main/src/transformers/pipelines/zero_shot_object_detection.py
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING logger = logging.get_logger(__name__) @add_end_docstrings(PIPELINE_INIT_ARGS) class ZeroShotObjectDetectionPipeline(ChunkPipeline): """ Zero shot object detection pipeline using `OwlViTForObjectDetection`. This pipeline predicts bounding boxes of objects when you provide an image and a set of `candidate_labels`. Example: ```python >>> from transformers import pipeline >>> detector = pipeline(model="google/owlvit-base-patch32", task="zero-shot-object-detection") >>> detector( ... "http://images.cocodataset.org/val2017/000000039769.jpg", ... candidate_labels=["cat", "couch"], ... ) [{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.254, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}] >>> detector( ... "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", ... candidate_labels=["head", "bird"], ... ) [{'score': 0.119, 'label': 'bird', 'box': {'xmin': 71, 'ymin': 170, 'xmax': 410, 'ymax': 508}}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This object detection pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"zero-shot-object-detection"`. See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=zero-shot-object-detection). """ def __init__(self, **kwargs): super().__init__(**kwargs) if self.framework == "tf": raise ValueError(f"The {self.__class__} is only available in PyTorch.") requires_backends(self, "vision") self.check_model_type(MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING) def __call__( self, image: Union[str, "Image.Image", List[Dict[str, Any]]], candidate_labels: Union[str, List[str]] = None, **kwargs, ): """ Detect objects (bounding boxes & classes) in the image(s) passed as inputs. Args: image (`str`, `PIL.Image` or `List[Dict[str, Any]]`): The pipeline handles three types of images: - A string containing an http url pointing to an image - A string containing a local path to an image - An image loaded in PIL directly You can use this parameter to send directly a list of images, or a dataset or a generator like so: ```python >>> from transformers import pipeline >>> detector = pipeline(model="google/owlvit-base-patch32", task="zero-shot-object-detection") >>> detector( ... [ ... { ... "image": "http://images.cocodataset.org/val2017/000000039769.jpg", ... "candidate_labels": ["cat", "couch"], ... }, ... { ... "image": "http://images.cocodataset.org/val2017/000000039769.jpg", ... "candidate_labels": ["cat", "couch"], ... }, ... ] ... ) [[{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.25, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}], [{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.254, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}]] ``` candidate_labels (`str` or `List[str]` or `List[List[str]]`): What the model should recognize in the image. threshold (`float`, *optional*, defaults to 0.1): The probability necessary to make a prediction. top_k (`int`, *optional*, defaults to None): The number of top predictions that will be returned by the pipeline. If the provided number is `None` or higher than the number of predictions available, it will default to the number of predictions. Return: A list of lists containing prediction results, one list per input image. Each list contains dictionaries with the following keys: - **label** (`str`) -- Text query corresponding to the found object. - **score** (`float`) -- Score corresponding to the object (between 0 and 1). - **box** (`Dict[str,int]`) -- Bounding box of the detected object in image's original size. It is a dictionary with `x_min`, `x_max`, `y_min`, `y_max` keys. """ if "text_queries" in kwargs: candidate_labels = kwargs.pop("text_queries") if isinstance(image, (str, Image.Image)): inputs = {"image": image, "candidate_labels": candidate_labels} else: inputs = image results = super().__call__(inputs, **kwargs) return results def _sanitize_parameters(self, **kwargs): postprocess_params = {} if "threshold" in kwargs: postprocess_params["threshold"] = kwargs["threshold"] if "top_k" in kwargs: postprocess_params["top_k"] = kwargs["top_k"] return {}, {}, postprocess_params def preprocess(self, inputs): image = load_image(inputs["image"]) candidate_labels = inputs["candidate_labels"] if isinstance(candidate_labels, str): candidate_labels = candidate_labels.split(",") target_size = torch.tensor([[image.height, image.width]], dtype=torch.int32) for i, candidate_label in enumerate(candidate_labels): text_inputs = self.tokenizer(candidate_label, return_tensors=self.framework) image_features = self.image_processor(image, return_tensors=self.framework) yield { "is_last": i == len(candidate_labels) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def _forward(self, model_inputs): target_size = model_inputs.pop("target_size") candidate_label = model_inputs.pop("candidate_label") is_last = model_inputs.pop("is_last") outputs = self.model(**model_inputs) model_outputs = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs} return model_outputs def postprocess(self, model_outputs, threshold=0.1, top_k=None): results = [] for model_output in model_outputs: label = model_output["candidate_label"] model_output = BaseModelOutput(model_output) outputs = self.image_processor.post_process_object_detection( outputs=model_output, threshold=threshold, target_sizes=model_output["target_size"] )[0] for index in outputs["scores"].nonzero(): score = outputs["scores"][index].item() box = self._get_bounding_box(outputs["boxes"][index][0]) result = {"score": score, "label": label, "box": box} results.append(result) results = sorted(results, key=lambda x: x["score"], reverse=True) if top_k: results = results[:top_k] return results def _get_bounding_box(self, box: "torch.Tensor") -> Dict[str, int]: """ Turns list [xmin, xmax, ymin, ymax] into dict { "xmin": xmin, ... } Args: box (`torch.Tensor`): Tensor containing the coordinates in corners format. Returns: bbox (`Dict[str, int]`): Dict containing the coordinates in corners format. """ if self.framework != "pt": raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.") xmin, ymin, xmax, ymax = box.int().tolist() bbox = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
9,029
41.59434
577
py
transformers
transformers-main/src/transformers/pipelines/image_to_text.py
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING logger = logging.get_logger(__name__) @add_end_docstrings(PIPELINE_INIT_ARGS) class ImageToTextPipeline(Pipeline): """ Image To Text pipeline using a `AutoModelForVision2Seq`. This pipeline predicts a caption for a given image. Example: ```python >>> from transformers import pipeline >>> captioner = pipeline(model="ydshieh/vit-gpt2-coco-en") >>> captioner("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png") [{'generated_text': 'two birds are standing next to each other '}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This image to text pipeline can currently be loaded from pipeline() using the following task identifier: "image-to-text". See the list of available models on [huggingface.co/models](https://huggingface.co/models?pipeline_tag=image-to-text). """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) requires_backends(self, "vision") self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def _sanitize_parameters(self, max_new_tokens=None, generate_kwargs=None, prompt=None): forward_kwargs = {} preprocess_params = {} if prompt is not None: preprocess_params["prompt"] = prompt if generate_kwargs is not None: forward_kwargs["generate_kwargs"] = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: forward_kwargs["generate_kwargs"] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( "'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter," " please use only one" ) forward_kwargs["generate_kwargs"]["max_new_tokens"] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__(self, images: Union[str, List[str], "Image.Image", List["Image.Image"]], **kwargs): """ Assign labels to the image(s) passed as inputs. Args: images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): The pipeline handles three types of images: - A string containing a HTTP(s) link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images. max_new_tokens (`int`, *optional*): The amount of maximum tokens to generate. By default it will use `generate` default. generate_kwargs (`Dict`, *optional*): Pass it to send all of these arguments directly to `generate` allowing full control of this function. Return: A list or a list of list of `dict`: Each result comes as a dictionary with the following key: - **generated_text** (`str`) -- The generated text. """ return super().__call__(images, **kwargs) def preprocess(self, image, prompt=None): image = load_image(image) if prompt is not None: if not isinstance(prompt, str): raise ValueError( f"Received an invalid text input, got - {type(prompt)} - but expected a single string. " "Note also that one single text can be provided for conditional image to text generation." ) model_type = self.model.config.model_type if model_type == "git": model_inputs = self.image_processor(images=image, return_tensors=self.framework) input_ids = self.tokenizer(text=prompt, add_special_tokens=False).input_ids input_ids = [self.tokenizer.cls_token_id] + input_ids input_ids = torch.tensor(input_ids).unsqueeze(0) model_inputs.update({"input_ids": input_ids}) elif model_type == "pix2struct": model_inputs = self.image_processor(images=image, header_text=prompt, return_tensors=self.framework) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation model_inputs = self.image_processor(images=image, return_tensors=self.framework) text_inputs = self.tokenizer(prompt, return_tensors=self.framework) model_inputs.update(text_inputs) else: raise ValueError(f"Model type {model_type} does not support conditional text generation") else: model_inputs = self.image_processor(images=image, return_tensors=self.framework) if self.model.config.model_type == "git" and prompt is None: model_inputs["input_ids"] = None return model_inputs def _forward(self, model_inputs, generate_kwargs=None): # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs["input_ids"], list) and all(x is None for x in model_inputs["input_ids"]) ): model_inputs["input_ids"] = None if generate_kwargs is None: generate_kwargs = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. inputs = model_inputs.pop(self.model.main_input_name) model_outputs = self.model.generate(inputs, **model_inputs, **generate_kwargs) return model_outputs def postprocess(self, model_outputs): records = [] for output_ids in model_outputs: record = { "generated_text": self.tokenizer.decode( output_ids, skip_special_tokens=True, ) } records.append(record) return records
7,138
39.106742
117
py
transformers
transformers-main/src/transformers/pipelines/automatic_speech_recognition.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict from typing import TYPE_CHECKING, Dict, Optional, Union import numpy as np import requests from ..utils import is_torch_available, is_torchaudio_available, logging from .audio_utils import ffmpeg_read from .base import ChunkPipeline if TYPE_CHECKING: from pyctcdecode import BeamSearchDecoderCTC from ..feature_extraction_sequence_utils import SequenceFeatureExtractor logger = logging.get_logger(__name__) if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_CTC_MAPPING, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING def rescale_stride(stride, ratio): """ Rescales the stride values from audio space to tokens/logits space. (160_000, 16_000, 16_000) -> (2000, 200, 200) for instance. """ # Shape is [B, SEQ] for tokens # [B, SEQ, V] for logits new_strides = [] for input_n, left, right in stride: token_n = int(round(input_n * ratio)) left = int(round(left / input_n * token_n)) right = int(round(right / input_n * token_n)) new_stride = (token_n, left, right) new_strides.append(new_stride) return new_strides def chunk_iter(inputs, feature_extractor, chunk_len, stride_left, stride_right, rescale=True, dtype=None): inputs_len = inputs.shape[0] step = chunk_len - stride_left - stride_right for chunk_start_idx in range(0, inputs_len, step): chunk_end_idx = chunk_start_idx + chunk_len chunk = inputs[chunk_start_idx:chunk_end_idx] processed = feature_extractor(chunk, sampling_rate=feature_extractor.sampling_rate, return_tensors="pt") if dtype is not None: processed = processed.to(dtype=dtype) _stride_left = 0 if chunk_start_idx == 0 else stride_left # all right strides must be full, otherwise it is the last item is_last = chunk_end_idx > inputs_len if stride_right > 0 else chunk_end_idx >= inputs_len _stride_right = 0 if is_last else stride_right chunk_len = chunk.shape[0] stride = (chunk_len, _stride_left, _stride_right) if "input_features" in processed: processed_len = processed["input_features"].shape[-1] elif "input_values" in processed: processed_len = processed["input_values"].shape[-1] if processed_len != chunk.shape[-1] and rescale: ratio = processed_len / chunk_len stride = rescale_stride([stride], ratio)[0] if chunk.shape[0] > _stride_left: yield {"is_last": is_last, "stride": stride, **processed} if is_last: break def _fast_find_longest_common_sequence(sequence_left, sequence_right): seq_len_left = len(sequence_left) seq_len_right = len(sequence_right) counter = [[0] * (seq_len_right + 1) for _ in range(seq_len_left + 1)] longest = 0 for i in range(seq_len_left): for j in range(seq_len_right): if sequence_left[i] == sequence_right[j]: previous_counter = counter[i][j] + 1 counter[i + 1][j + 1] = previous_counter if previous_counter > longest: longest = previous_counter counter = np.array(counter) # we return the idx of the first element of the longest common sequence in the left sequence index_left = np.argwhere(counter == longest)[-1][0] - longest if longest != 0 else -1 index_right = np.argwhere(counter == longest)[-1][1] - longest if longest != 0 else -1 return index_left, index_right, longest def _find_longest_common_sequence(sequences, tokenizer): # TODO Use a faster algorithm this can probably be done in O(n) # using suffix array. # It might be tedious to do because of fault tolerance. # We actually have a really good property which is that the total sequence # MUST be those subsequences in order. # Also the algorithm should be more tolerant to errors. sequence = [tok_id for tok_id in sequences[0][0].tolist() if tok_id not in tokenizer.all_special_ids] for new_seq in sequences[1:]: new_sequence = [tok_id for tok_id in new_seq[0].tolist() if tok_id not in tokenizer.all_special_ids] index = 0 max_ = 0.0 for i in range(1, len(new_sequence) + 1): # epsilon to favor long perfect matches eps = i / 10000.0 matches = np.sum(np.array(sequence[-i:]) == np.array(new_sequence[:i])) matching = matches / i + eps if matches > 1 and matching > max_: index = i max_ = matching sequence.extend(new_sequence[index:]) return np.array(sequence) class AutomaticSpeechRecognitionPipeline(ChunkPipeline): """ Pipeline that aims at extracting spoken text contained within some audio. The input can be either a raw waveform or a audio file. In case of the audio file, ffmpeg should be installed for to support multiple audio formats Example: ```python >>> from transformers import pipeline >>> transcriber = pipeline(model="openai/whisper-base") >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac") {'text': ' He hoped there would be stew for dinner, turnips and carrots and bruised potatoes and fat mutton pieces to be ladled out in thick, peppered flour-fatten sauce.'} ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) Arguments: model ([`PreTrainedModel`] or [`TFPreTrainedModel`]): The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from [`PreTrainedModel`] for PyTorch and [`TFPreTrainedModel`] for TensorFlow. tokenizer ([`PreTrainedTokenizer`]): The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from [`PreTrainedTokenizer`]. feature_extractor ([`SequenceFeatureExtractor`]): The feature extractor that will be used by the pipeline to encode waveform for the model. chunk_length_s (`float`, *optional*, defaults to 0): The input length for in each chunk. If `chunk_length_s = 0` then chunking is disabled (default). Only available for CTC models, e.g. [`Wav2Vec2ForCTC`]. <Tip> For more information on how to effectively use `chunk_length_s`, please have a look at the [ASR chunking blog post](https://huggingface.co/blog/asr-chunking). </Tip> stride_length_s (`float`, *optional*, defaults to `chunk_length_s / 6`): The length of stride on the left and right of each chunk. Used only with `chunk_length_s > 0`. This enables the model to *see* more context and infer letters better than without this context but the pipeline discards the stride bits at the end to make the final reconstitution as perfect as possible. <Tip> For more information on how to effectively use `stride_length_s`, please have a look at the [ASR chunking blog post](https://huggingface.co/blog/asr-chunking). </Tip> framework (`str`, *optional*): The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be installed. If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is provided. device (Union[`int`, `torch.device`], *optional*): Device ordinal for CPU/GPU supports. Setting this to `None` will leverage CPU, a positive will run the model on the associated CUDA device id. decoder (`pyctcdecode.BeamSearchDecoderCTC`, *optional*): [PyCTCDecode's BeamSearchDecoderCTC](https://github.com/kensho-technologies/pyctcdecode/blob/2fd33dc37c4111417e08d89ccd23d28e9b308d19/pyctcdecode/decoder.py#L180) can be passed for language model boosted decoding. See [`Wav2Vec2ProcessorWithLM`] for more information. """ def __init__( self, feature_extractor: Union["SequenceFeatureExtractor", str], *, decoder: Optional[Union["BeamSearchDecoderCTC", str]] = None, **kwargs, ): super().__init__(**kwargs) self.feature_extractor = feature_extractor if self.model.config.model_type == "whisper": self.type = "seq2seq_whisper" elif self.model.__class__ in MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING.values(): self.type = "seq2seq" elif ( feature_extractor._processor_class and feature_extractor._processor_class.endswith("WithLM") and decoder is not None ): self.decoder = decoder self.type = "ctc_with_lm" else: self.type = "ctc" if self.framework == "tf": raise ValueError("The AutomaticSpeechRecognitionPipeline is only available in PyTorch.") self.check_model_type(dict(MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING.items() + MODEL_FOR_CTC_MAPPING.items())) def __call__( self, inputs: Union[np.ndarray, bytes, str], **kwargs, ): """ Transcribe the audio sequence(s) given as inputs to text. See the [`AutomaticSpeechRecognitionPipeline`] documentation for more information. Args: inputs (`np.ndarray` or `bytes` or `str` or `dict`): The inputs is either : - `str` that is the filename of the audio file, the file will be read at the correct sampling rate to get the waveform using *ffmpeg*. This requires *ffmpeg* to be installed on the system. - `bytes` it is supposed to be the content of an audio file and is interpreted by *ffmpeg* in the same way. - (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`) Raw audio at the correct sampling rate (no further check will be done) - `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this pipeline do the resampling. The dict must be in the format `{"sampling_rate": int, "raw": np.array}` with optionally a `"stride": (left: int, right: int)` than can ask the pipeline to treat the first `left` samples and last `right` samples to be ignored in decoding (but used at inference to provide more context to the model). Only use `stride` with CTC models. return_timestamps (*optional*, `str`): Only available for pure CTC models. If set to `"char"`, the pipeline will return timestamps along the text for every character in the text. For instance if you get `[{"text": "h", "timestamp": (0.5, 0.6)}, {"text": "i", "timestamp": (0.7, 0.9)}]`, then it means the model predicts that the letter "h" was pronounced after `0.5` and before `0.6` seconds. If set to `"word"`, the pipeline will return timestamps along the text for every word in the text. For instance if you get `[{"text": "hi ", "timestamp": (0.5, 0.9)}, {"text": "there", "timestamp": (1.0, 1.5)}]`, then it means the model predicts that the word "hi" was pronounced after `0.5` and before `0.9` seconds. generate_kwargs (`dict`, *optional*): The dictionary of ad-hoc parametrization of `generate_config` to be used for the generation call. For a complete overview of generate, check the [following guide](https://huggingface.co/docs/transformers/en/main_classes/text_generation). max_new_tokens (`int`, *optional*): The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt. Return: `Dict`: A dictionary with the following keys: - **text** (`str` ) -- The recognized text. - **chunks** (*optional(, `List[Dict]`) When using `return_timestamps`, the `chunks` will become a list containing all the various text chunks identified by the model, *e.g.* `[{"text": "hi ", "timestamp": (0.5, 0.9)}, {"text": "there", "timestamp": (1.0, 1.5)}]`. The original full text can roughly be recovered by doing `"".join(chunk["text"] for chunk in output["chunks"])`. """ return super().__call__(inputs, **kwargs) def _sanitize_parameters( self, chunk_length_s=None, stride_length_s=None, ignore_warning=None, decoder_kwargs=None, return_timestamps=None, return_language=None, generate_kwargs=None, max_new_tokens=None, ): # No parameters on this pipeline right now preprocess_params = {} if chunk_length_s is not None: preprocess_params["chunk_length_s"] = chunk_length_s if stride_length_s is not None: preprocess_params["stride_length_s"] = stride_length_s if ignore_warning is not None: preprocess_params["ignore_warning"] = ignore_warning forward_params = defaultdict(dict) if max_new_tokens is not None: forward_params["generate_kwargs"]["max_new_tokens"] = max_new_tokens if generate_kwargs is not None: if max_new_tokens is not None and "max_new_tokens" in generate_kwargs: raise ValueError( "`max_new_tokens` is defined both as an argument and inside `generate_kwargs` argument, please use" " only 1 version" ) forward_params["generate_kwargs"].update(generate_kwargs) postprocess_params = {} if decoder_kwargs is not None: postprocess_params["decoder_kwargs"] = decoder_kwargs if return_timestamps is not None: forward_params["return_timestamps"] = return_timestamps postprocess_params["return_timestamps"] = return_timestamps if return_language is not None: postprocess_params["return_language"] = return_language return preprocess_params, forward_params, postprocess_params def preprocess(self, inputs, chunk_length_s=0, stride_length_s=None, ignore_warning=False): if isinstance(inputs, str): if inputs.startswith("http://") or inputs.startswith("https://"): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png inputs = requests.get(inputs).content else: with open(inputs, "rb") as f: inputs = f.read() if isinstance(inputs, bytes): inputs = ffmpeg_read(inputs, self.feature_extractor.sampling_rate) stride = None extra = {} if isinstance(inputs, dict): stride = inputs.pop("stride", None) # Accepting `"array"` which is the key defined in `datasets` for # better integration if not ("sampling_rate" in inputs and ("raw" in inputs or "array" in inputs)): raise ValueError( "When passing a dictionary to AutomaticSpeechRecognitionPipeline, the dict needs to contain a " '"raw" key containing the numpy array representing the audio and a "sampling_rate" key, ' "containing the sampling_rate associated with that array" ) _inputs = inputs.pop("raw", None) if _inputs is None: # Remove path which will not be used from `datasets`. inputs.pop("path", None) _inputs = inputs.pop("array", None) in_sampling_rate = inputs.pop("sampling_rate") extra = inputs inputs = _inputs if in_sampling_rate != self.feature_extractor.sampling_rate: import torch if is_torchaudio_available(): from torchaudio import functional as F else: raise ImportError( "torchaudio is required to resample audio samples in AutomaticSpeechRecognitionPipeline. " "The torchaudio package can be installed through: `pip install torchaudio`." ) inputs = F.resample( torch.from_numpy(inputs), in_sampling_rate, self.feature_extractor.sampling_rate ).numpy() ratio = self.feature_extractor.sampling_rate / in_sampling_rate else: ratio = 1 if stride is not None: if stride[0] + stride[1] > inputs.shape[0]: raise ValueError("Stride is too large for input") # Stride needs to get the chunk length here, it's going to get # swallowed by the `feature_extractor` later, and then batching # can add extra data in the inputs, so we need to keep track # of the original length in the stride so we can cut properly. stride = (inputs.shape[0], int(round(stride[0] * ratio)), int(round(stride[1] * ratio))) if not isinstance(inputs, np.ndarray): raise ValueError(f"We expect a numpy ndarray as input, got `{type(inputs)}`") if len(inputs.shape) != 1: raise ValueError("We expect a single channel audio input for AutomaticSpeechRecognitionPipeline") if chunk_length_s: if self.type == "seq2seq" and not ignore_warning: logger.warning( "Using `chunk_length_s` is very experimental with seq2seq models. The results will not necessarily" " be entirely accurate and will have caveats. More information:" " https://github.com/huggingface/transformers/pull/20104. Ignore this warning with pipeline(...," " ignore_warning=True)" ) self._preprocess_params["ignore_warning"] = True if stride_length_s is None: stride_length_s = chunk_length_s / 6 if isinstance(stride_length_s, (int, float)): stride_length_s = [stride_length_s, stride_length_s] # XXX: Carefuly, this variable will not exist in `seq2seq` setting. # Currently chunking is not possible at this level for `seq2seq` so # it's ok. align_to = getattr(self.model.config, "inputs_to_logits_ratio", 1) chunk_len = int(round(chunk_length_s * self.feature_extractor.sampling_rate / align_to) * align_to) stride_left = int(round(stride_length_s[0] * self.feature_extractor.sampling_rate / align_to) * align_to) stride_right = int(round(stride_length_s[1] * self.feature_extractor.sampling_rate / align_to) * align_to) if chunk_len < stride_left + stride_right: raise ValueError("Chunk length must be superior to stride length") rescale = self.type != "seq2seq_whisper" # make sure that for item in chunk_iter( inputs, self.feature_extractor, chunk_len, stride_left, stride_right, rescale, self.torch_dtype ): yield item else: processed = self.feature_extractor( inputs, sampling_rate=self.feature_extractor.sampling_rate, return_tensors="pt" ) if self.torch_dtype is not None: processed = processed.to(dtype=self.torch_dtype) if stride is not None: if self.type == "seq2seq": raise ValueError("Stride is only usable with CTC models, try removing it !") processed["stride"] = stride yield {"is_last": True, **processed, **extra} def _forward(self, model_inputs, return_timestamps=False, generate_kwargs=None): if generate_kwargs is None: generate_kwargs = {} if return_timestamps and self.type == "seq2seq_whisper": generate_kwargs["return_timestamps"] = return_timestamps if return_timestamps == "word": generate_kwargs["return_token_timestamps"] = True is_last = model_inputs.pop("is_last") if self.type in {"seq2seq", "seq2seq_whisper"}: encoder = self.model.get_encoder() # Consume values so we can let extra information flow freely through # the pipeline (important for `partial` in microphone) if "input_features" in model_inputs: inputs = model_inputs.pop("input_features") elif "input_values" in model_inputs: inputs = model_inputs.pop("input_values") else: raise ValueError( "Seq2Seq speech recognition model requires either a " f"`input_features` or `input_values` key, but only has {model_inputs.keys()}" ) # we need to pass `processed.get("attention_mask")` here since audio encoder # attention mask length is different from expected text decoder `encoder_attention_mask` length # `generate` magic to create the mask automatically won't work, we basically need to help # it here. attention_mask = model_inputs.pop("attention_mask", None) tokens = self.model.generate( encoder_outputs=encoder(inputs, attention_mask=attention_mask), attention_mask=attention_mask, **generate_kwargs, ) if return_timestamps == "word" and self.type == "seq2seq_whisper": out = {"tokens": tokens["sequences"], "token_timestamps": tokens["token_timestamps"]} else: out = {"tokens": tokens} if self.type == "seq2seq_whisper": stride = model_inputs.pop("stride", None) if stride is not None: out["stride"] = stride else: stride = model_inputs.pop("stride", None) input_values = model_inputs.pop("input_values") attention_mask = model_inputs.pop("attention_mask", None) outputs = self.model(input_values=input_values, attention_mask=attention_mask) logits = outputs.logits if self.type == "ctc_with_lm": out = {"logits": logits} else: out = {"tokens": logits.argmax(dim=-1)} if stride is not None: # Send stride to `postprocess`. # it needs to be handled there where # the pieces are to be concatenated. ratio = 1 / self.model.config.inputs_to_logits_ratio if isinstance(stride, tuple): out["stride"] = rescale_stride([stride], ratio)[0] else: out["stride"] = rescale_stride(stride, ratio) # Leftover extra = model_inputs return {"is_last": is_last, **out, **extra} def postprocess( self, model_outputs, decoder_kwargs: Optional[Dict] = None, return_timestamps=None, return_language=None ): # Optional return types optional = {} if return_timestamps and self.type == "seq2seq": raise ValueError("We cannot return_timestamps yet on non-ctc models apart from Whisper !") if return_timestamps == "char" and self.type == "ctc_with_lm": raise ValueError("CTC with LM cannot return `char` timestamps, only `word`") if return_timestamps == "char" and self.type == "seq2seq_whisper": raise ValueError("Whisper cannot return `char` timestamps, use `True` or `word` instead.") if return_language is not None and self.type != "seq2seq_whisper": raise ValueError("Only whisper can return language for now.") final_items = [] key = "logits" if self.type == "ctc_with_lm" else "tokens" stride = None for outputs in model_outputs: items = outputs[key].numpy() stride = outputs.get("stride", None) if stride is not None and self.type in {"ctc", "ctc_with_lm"}: total_n, left, right = stride # Total_n might be < logits.shape[1] # because of padding, that's why # we need to reconstruct this information # This won't work with left padding (which doesn't exist right now) right_n = total_n - right items = items[:, left:right_n] final_items.append(items) if stride and self.type == "seq2seq": items = _find_longest_common_sequence(final_items, self.tokenizer) elif self.type == "seq2seq_whisper": time_precision = self.feature_extractor.chunk_length / self.model.config.max_source_positions # Send the chunking back to seconds, it's easier to handle in whisper sampling_rate = self.feature_extractor.sampling_rate for output in model_outputs: if "stride" in output: chunk_len, stride_left, stride_right = output["stride"] # Go back in seconds chunk_len /= sampling_rate stride_left /= sampling_rate stride_right /= sampling_rate output["stride"] = chunk_len, stride_left, stride_right text, optional = self.tokenizer._decode_asr( model_outputs, return_timestamps=return_timestamps, return_language=return_language, time_precision=time_precision, ) else: items = np.concatenate(final_items, axis=1) items = items.squeeze(0) if self.type == "ctc_with_lm": if decoder_kwargs is None: decoder_kwargs = {} beams = self.decoder.decode_beams(items, **decoder_kwargs) text = beams[0][0] if return_timestamps: # Simply cast from pyctcdecode format to wav2vec2 format to leverage # pre-existing code later chunk_offset = beams[0][2] offsets = [] for word, (start_offset, end_offset) in chunk_offset: offsets.append({"word": word, "start_offset": start_offset, "end_offset": end_offset}) elif self.type != "seq2seq_whisper": skip_special_tokens = self.type != "ctc" text = self.tokenizer.decode(items, skip_special_tokens=skip_special_tokens) if return_timestamps: offsets = self.tokenizer.decode( items, skip_special_tokens=skip_special_tokens, output_char_offsets=True )["char_offsets"] if return_timestamps == "word": offsets = self.tokenizer._get_word_offsets(offsets, self.tokenizer.replace_word_delimiter_char) if return_timestamps and self.type not in {"seq2seq", "seq2seq_whisper"}: chunks = [] for item in offsets: start = item["start_offset"] * self.model.config.inputs_to_logits_ratio start /= self.feature_extractor.sampling_rate stop = item["end_offset"] * self.model.config.inputs_to_logits_ratio stop /= self.feature_extractor.sampling_rate chunks.append({"text": item[return_timestamps], "timestamp": (start, stop)}) optional["chunks"] = chunks extra = defaultdict(list) for output in model_outputs: output.pop("tokens", None) output.pop("logits", None) output.pop("is_last", None) output.pop("stride", None) output.pop("token_timestamps", None) for k, v in output.items(): extra[k].append(v) return {"text": text, **optional, **extra} def _find_timestamp_sequence(sequences, tokenizer, feature_extractor, max_source_positions): """ Computes the final sequences by merging the end of the nth sequence with the beginning of the n+1th sequence. Since `WhisperForConditionalGeneration` produces the timestamps pairwise, we filter the consecutive timestamps and only iterate over them. We keep track of the `time` which indicates the actual starting time of the chunk that is processed. We need to make sure to offset the timestamps tokens by the `time` in order for the tokenizer to properly compute the final `offset`. """ # index of the first timestamp token timestamp_begin = tokenizer.convert_tokens_to_ids("<|notimestamps|>") + 1 items = [] # approximation of the token to time ratio : ~0.2seconds time_precision = feature_extractor.chunk_length / max_source_positions time = 0 for seq_idx, item in enumerate(sequences): sequence, stride = item if isinstance(sequence, list): sequence = np.array(sequence) chunk_len, stride_left, stride_right = stride sequence = sequence.squeeze(0) # get rid of the `forced_decoder_idx` that are use to parametrize the generation begin_idx = np.where(sequence == timestamp_begin)[0][0] if timestamp_begin in sequence else 0 sequence = sequence[begin_idx:] timestamp_tokens = sequence >= timestamp_begin if seq_idx != 0 and sum(timestamp_tokens) > 0: consecutive = np.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0] + 1 last_timestamp = np.where(timestamp_tokens)[0][-1] consecutive = np.append(consecutive, last_timestamp) if last_timestamp not in consecutive else consecutive time -= stride_left + stride_right offset = int((time / feature_extractor.sampling_rate) / time_precision) overlap_time = int((stride_left / feature_extractor.sampling_rate) / time_precision) # relevant timestamps are in the overlapping part relevant_timestamp = np.where(sequence[consecutive] >= timestamp_begin + overlap_time)[0] if relevant_timestamp.shape[0] > 0: relevant_timestamp = ( consecutive[relevant_timestamp[0] - 1] if relevant_timestamp[0] > 0 else consecutive[0] ) # if a big stride is used, we need to check some of the previous items for the best overlap best_match = 0 sliced_sequence = [] for idx, previous_sequence in enumerate(reversed(items)): previous_tokens = previous_sequence[1:-1] if previous_sequence[0] < (timestamp_begin + offset - overlap_time) and idx != 0: break # the previous sequence is too far in the past if len(previous_tokens) > 0: # find the longest common sequence between the overlapping parts index_left, index_right, match_length = _fast_find_longest_common_sequence( sequence[1:relevant_timestamp], previous_tokens ) # don't do anything if only 1 token was matched if match_length > 1 and match_length > best_match: best_match = match_length best_idx = idx end_of_curr_sequence_idx = ( np.where(sequence[index_left + 1 :] >= timestamp_begin)[0][0] + 1 ) end_of_curr_sequence_idx = end_of_curr_sequence_idx + 1 + index_left # if all the tokens are matched, suffix if index_left == 0 and match_length == len(previous_tokens): sliced_sequence = np.insert( sequence[index_left + 1 : end_of_curr_sequence_idx], 0, previous_sequence[0] ) sliced_sequence[-1] = previous_sequence[-1] # if part of the previous sequence is not taken elif index_left >= 0: sliced_sequence = sequence[index_left + 1 : end_of_curr_sequence_idx] # let's insert the missing part of the previous sequence previous_slice = ( previous_sequence[: index_right + 1] if index_right > 0 else [previous_sequence[0]] ) sliced_sequence = np.insert(sliced_sequence, 0, previous_slice) sliced_sequence[-1] += offset if len(sliced_sequence) > 0: items[len(items) - best_idx - 1] = sliced_sequence items = items[: len(items) - best_idx] sequence = sequence[end_of_curr_sequence_idx:] # sequence might have changed timestamp_tokens = sequence >= timestamp_begin consecutive = np.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0] + 1 if sum(timestamp_tokens) > 0: last_timestamp = np.where(timestamp_tokens)[0][-1] consecutive = ( np.append(consecutive, last_timestamp + 1) if last_timestamp not in consecutive else consecutive ) if len(consecutive) > 0: last_slice = 0 for current_slice in consecutive: actual_offset = items[-1][-1] if seq_idx != 0 or last_slice != 0 else sequence[0] sliced_tokens = sequence[last_slice:current_slice] duration = sliced_tokens[-1] - sliced_tokens[0] sliced_tokens[0] = actual_offset sliced_tokens[-1] = actual_offset + duration items.append(sliced_tokens) last_slice = current_slice time += chunk_len result = [] for i in range(len(items)): result += items[i].tolist() return result
35,296
49.496423
176
py
transformers
transformers-main/src/transformers/pipelines/token_classification.py
import types import warnings from typing import List, Optional, Tuple, Union import numpy as np from ..models.bert.tokenization_bert import BasicTokenizer from ..utils import ( ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available, ) from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline, Dataset if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING class TokenClassificationArgumentHandler(ArgumentHandler): """ Handles arguments for token classification. """ def __call__(self, inputs: Union[str, List[str]], **kwargs): if inputs is not None and isinstance(inputs, (list, tuple)) and len(inputs) > 0: inputs = list(inputs) batch_size = len(inputs) elif isinstance(inputs, str): inputs = [inputs] batch_size = 1 elif Dataset is not None and isinstance(inputs, Dataset) or isinstance(inputs, types.GeneratorType): return inputs, None else: raise ValueError("At least one input is required.") offset_mapping = kwargs.get("offset_mapping") if offset_mapping: if isinstance(offset_mapping, list) and isinstance(offset_mapping[0], tuple): offset_mapping = [offset_mapping] if len(offset_mapping) != batch_size: raise ValueError("offset_mapping should have the same batch size as the input") return inputs, offset_mapping class AggregationStrategy(ExplicitEnum): """All the valid aggregation strategies for TokenClassificationPipeline""" NONE = "none" SIMPLE = "simple" FIRST = "first" AVERAGE = "average" MAX = "max" @add_end_docstrings( PIPELINE_INIT_ARGS, r""" ignore_labels (`List[str]`, defaults to `["O"]`): A list of labels to ignore. grouped_entities (`bool`, *optional*, defaults to `False`): DEPRECATED, use `aggregation_strategy` instead. Whether or not to group the tokens corresponding to the same entity together in the predictions or not. stride (`int`, *optional*): If stride is provided, the pipeline is applied on all the text. The text is split into chunks of size model_max_length. Works only with fast tokenizers and `aggregation_strategy` different from `NONE`. The value of this argument defines the number of overlapping tokens between chunks. In other words, the model will shift forward by `tokenizer.model_max_length - stride` tokens each step. aggregation_strategy (`str`, *optional*, defaults to `"none"`): The strategy to fuse (or not) tokens based on the model prediction. - "none" : Will simply not do any aggregation and simply return raw results from the model - "simple" : Will attempt to group entities following the default schema. (A, B-TAG), (B, I-TAG), (C, I-TAG), (D, B-TAG2) (E, B-TAG2) will end up being [{"word": ABC, "entity": "TAG"}, {"word": "D", "entity": "TAG2"}, {"word": "E", "entity": "TAG2"}] Notice that two consecutive B tags will end up as different entities. On word based languages, we might end up splitting words undesirably : Imagine Microsoft being tagged as [{"word": "Micro", "entity": "ENTERPRISE"}, {"word": "soft", "entity": "NAME"}]. Look for FIRST, MAX, AVERAGE for ways to mitigate that and disambiguate words (on languages that support that meaning, which is basically tokens separated by a space). These mitigations will only work on real words, "New york" might still be tagged with two different entities. - "first" : (works only on word based models) Will use the `SIMPLE` strategy except that words, cannot end up with different tags. Words will simply use the tag of the first token of the word when there is ambiguity. - "average" : (works only on word based models) Will use the `SIMPLE` strategy except that words, cannot end up with different tags. scores will be averaged first across tokens, and then the maximum label is applied. - "max" : (works only on word based models) Will use the `SIMPLE` strategy except that words, cannot end up with different tags. Word entity will simply be the token with the maximum score. """, ) class TokenClassificationPipeline(ChunkPipeline): """ Named Entity Recognition pipeline using any `ModelForTokenClassification`. See the [named entity recognition examples](../task_summary#named-entity-recognition) for more information. Example: ```python >>> from transformers import pipeline >>> token_classifier = pipeline(model="Jean-Baptiste/camembert-ner", aggregation_strategy="simple") >>> sentence = "Je m'appelle jean-baptiste et je vis à montréal" >>> tokens = token_classifier(sentence) >>> tokens [{'entity_group': 'PER', 'score': 0.9931, 'word': 'jean-baptiste', 'start': 12, 'end': 26}, {'entity_group': 'LOC', 'score': 0.998, 'word': 'montréal', 'start': 38, 'end': 47}] >>> token = tokens[0] >>> # Start and end provide an easy way to highlight words in the original text. >>> sentence[token["start"] : token["end"]] ' jean-baptiste' >>> # Some models use the same idea to do part of speech. >>> syntaxer = pipeline(model="vblagoje/bert-english-uncased-finetuned-pos", aggregation_strategy="simple") >>> syntaxer("My name is Sarah and I live in London") [{'entity_group': 'PRON', 'score': 0.999, 'word': 'my', 'start': 0, 'end': 2}, {'entity_group': 'NOUN', 'score': 0.997, 'word': 'name', 'start': 3, 'end': 7}, {'entity_group': 'AUX', 'score': 0.994, 'word': 'is', 'start': 8, 'end': 10}, {'entity_group': 'PROPN', 'score': 0.999, 'word': 'sarah', 'start': 11, 'end': 16}, {'entity_group': 'CCONJ', 'score': 0.999, 'word': 'and', 'start': 17, 'end': 20}, {'entity_group': 'PRON', 'score': 0.999, 'word': 'i', 'start': 21, 'end': 22}, {'entity_group': 'VERB', 'score': 0.998, 'word': 'live', 'start': 23, 'end': 27}, {'entity_group': 'ADP', 'score': 0.999, 'word': 'in', 'start': 28, 'end': 30}, {'entity_group': 'PROPN', 'score': 0.999, 'word': 'london', 'start': 31, 'end': 37}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This token recognition pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"ner"` (for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous). The models that this pipeline can use are models that have been fine-tuned on a token classification task. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=token-classification). """ default_input_names = "sequences" def __init__(self, args_parser=TokenClassificationArgumentHandler(), *args, **kwargs): super().__init__(*args, **kwargs) self.check_model_type( TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING ) self._basic_tokenizer = BasicTokenizer(do_lower_case=False) self._args_parser = args_parser def _sanitize_parameters( self, ignore_labels=None, grouped_entities: Optional[bool] = None, ignore_subwords: Optional[bool] = None, aggregation_strategy: Optional[AggregationStrategy] = None, offset_mapping: Optional[List[Tuple[int, int]]] = None, stride: Optional[int] = None, ): preprocess_params = {} if offset_mapping is not None: preprocess_params["offset_mapping"] = offset_mapping postprocess_params = {} if grouped_entities is not None or ignore_subwords is not None: if grouped_entities and ignore_subwords: aggregation_strategy = AggregationStrategy.FIRST elif grouped_entities and not ignore_subwords: aggregation_strategy = AggregationStrategy.SIMPLE else: aggregation_strategy = AggregationStrategy.NONE if grouped_entities is not None: warnings.warn( "`grouped_entities` is deprecated and will be removed in version v5.0.0, defaulted to" f' `aggregation_strategy="{aggregation_strategy}"` instead.' ) if ignore_subwords is not None: warnings.warn( "`ignore_subwords` is deprecated and will be removed in version v5.0.0, defaulted to" f' `aggregation_strategy="{aggregation_strategy}"` instead.' ) if aggregation_strategy is not None: if isinstance(aggregation_strategy, str): aggregation_strategy = AggregationStrategy[aggregation_strategy.upper()] if ( aggregation_strategy in {AggregationStrategy.FIRST, AggregationStrategy.MAX, AggregationStrategy.AVERAGE} and not self.tokenizer.is_fast ): raise ValueError( "Slow tokenizers cannot handle subwords. Please set the `aggregation_strategy` option" ' to `"simple"` or use a fast tokenizer.' ) postprocess_params["aggregation_strategy"] = aggregation_strategy if ignore_labels is not None: postprocess_params["ignore_labels"] = ignore_labels if stride is not None: if stride >= self.tokenizer.model_max_length: raise ValueError( "`stride` must be less than `tokenizer.model_max_length` (or even lower if the tokenizer adds special tokens)" ) if aggregation_strategy == AggregationStrategy.NONE: raise ValueError( "`stride` was provided to process all the text but `aggregation_strategy=" f'"{aggregation_strategy}"`, please select another one instead.' ) else: if self.tokenizer.is_fast: tokenizer_params = { "return_overflowing_tokens": True, "padding": True, "stride": stride, } preprocess_params["tokenizer_params"] = tokenizer_params else: raise ValueError( "`stride` was provided to process all the text but you're using a slow tokenizer." " Please use a fast tokenizer." ) return preprocess_params, {}, postprocess_params def __call__(self, inputs: Union[str, List[str]], **kwargs): """ Classify each token of the text(s) given as inputs. Args: inputs (`str` or `List[str]`): One or several texts (or one list of texts) for token classification. Return: A list or a list of list of `dict`: Each result comes as a list of dictionaries (one for each token in the corresponding input, or each entity if this pipeline was instantiated with an aggregation_strategy) with the following keys: - **word** (`str`) -- The token/word classified. This is obtained by decoding the selected tokens. If you want to have the exact string in the original sentence, use `start` and `end`. - **score** (`float`) -- The corresponding probability for `entity`. - **entity** (`str`) -- The entity predicted for that token/word (it is named *entity_group* when *aggregation_strategy* is not `"none"`. - **index** (`int`, only present when `aggregation_strategy="none"`) -- The index of the corresponding token in the sentence. - **start** (`int`, *optional*) -- The index of the start of the corresponding entity in the sentence. Only exists if the offsets are available within the tokenizer - **end** (`int`, *optional*) -- The index of the end of the corresponding entity in the sentence. Only exists if the offsets are available within the tokenizer """ _inputs, offset_mapping = self._args_parser(inputs, **kwargs) if offset_mapping: kwargs["offset_mapping"] = offset_mapping return super().__call__(inputs, **kwargs) def preprocess(self, sentence, offset_mapping=None, **preprocess_params): tokenizer_params = preprocess_params.pop("tokenizer_params", {}) truncation = True if self.tokenizer.model_max_length and self.tokenizer.model_max_length > 0 else False inputs = self.tokenizer( sentence, return_tensors=self.framework, truncation=truncation, return_special_tokens_mask=True, return_offsets_mapping=self.tokenizer.is_fast, **tokenizer_params, ) inputs.pop("overflow_to_sample_mapping", None) num_chunks = len(inputs["input_ids"]) for i in range(num_chunks): if self.framework == "tf": model_inputs = {k: tf.expand_dims(v[i], 0) for k, v in inputs.items()} else: model_inputs = {k: v[i].unsqueeze(0) for k, v in inputs.items()} if offset_mapping is not None: model_inputs["offset_mapping"] = offset_mapping model_inputs["sentence"] = sentence if i == 0 else None model_inputs["is_last"] = i == num_chunks - 1 yield model_inputs def _forward(self, model_inputs): # Forward special_tokens_mask = model_inputs.pop("special_tokens_mask") offset_mapping = model_inputs.pop("offset_mapping", None) sentence = model_inputs.pop("sentence") is_last = model_inputs.pop("is_last") if self.framework == "tf": logits = self.model(**model_inputs)[0] else: output = self.model(**model_inputs) logits = output["logits"] if isinstance(output, dict) else output[0] return { "logits": logits, "special_tokens_mask": special_tokens_mask, "offset_mapping": offset_mapping, "sentence": sentence, "is_last": is_last, **model_inputs, } def postprocess(self, all_outputs, aggregation_strategy=AggregationStrategy.NONE, ignore_labels=None): if ignore_labels is None: ignore_labels = ["O"] all_entities = [] for model_outputs in all_outputs: logits = model_outputs["logits"][0].numpy() sentence = all_outputs[0]["sentence"] input_ids = model_outputs["input_ids"][0] offset_mapping = ( model_outputs["offset_mapping"][0] if model_outputs["offset_mapping"] is not None else None ) special_tokens_mask = model_outputs["special_tokens_mask"][0].numpy() maxes = np.max(logits, axis=-1, keepdims=True) shifted_exp = np.exp(logits - maxes) scores = shifted_exp / shifted_exp.sum(axis=-1, keepdims=True) if self.framework == "tf": input_ids = input_ids.numpy() offset_mapping = offset_mapping.numpy() if offset_mapping is not None else None pre_entities = self.gather_pre_entities( sentence, input_ids, scores, offset_mapping, special_tokens_mask, aggregation_strategy ) grouped_entities = self.aggregate(pre_entities, aggregation_strategy) # Filter anything that is in self.ignore_labels entities = [ entity for entity in grouped_entities if entity.get("entity", None) not in ignore_labels and entity.get("entity_group", None) not in ignore_labels ] all_entities.extend(entities) num_chunks = len(all_outputs) if num_chunks > 1: all_entities = self.aggregate_overlapping_entities(all_entities) return all_entities def aggregate_overlapping_entities(self, entities): if len(entities) == 0: return entities entities = sorted(entities, key=lambda x: x["start"]) aggregated_entities = [] previous_entity = entities[0] for entity in entities: if previous_entity["start"] <= entity["start"] < previous_entity["end"]: current_length = entity["end"] - entity["start"] previous_length = previous_entity["end"] - previous_entity["start"] if current_length > previous_length: previous_entity = entity elif current_length == previous_length and entity["score"] > previous_entity["score"]: previous_entity = entity else: aggregated_entities.append(previous_entity) previous_entity = entity aggregated_entities.append(previous_entity) return aggregated_entities def gather_pre_entities( self, sentence: str, input_ids: np.ndarray, scores: np.ndarray, offset_mapping: Optional[List[Tuple[int, int]]], special_tokens_mask: np.ndarray, aggregation_strategy: AggregationStrategy, ) -> List[dict]: """Fuse various numpy arrays into dicts with all the information needed for aggregation""" pre_entities = [] for idx, token_scores in enumerate(scores): # Filter special_tokens if special_tokens_mask[idx]: continue word = self.tokenizer.convert_ids_to_tokens(int(input_ids[idx])) if offset_mapping is not None: start_ind, end_ind = offset_mapping[idx] if not isinstance(start_ind, int): if self.framework == "pt": start_ind = start_ind.item() end_ind = end_ind.item() word_ref = sentence[start_ind:end_ind] if getattr(self.tokenizer, "_tokenizer", None) and getattr( self.tokenizer._tokenizer.model, "continuing_subword_prefix", None ): # This is a BPE, word aware tokenizer, there is a correct way # to fuse tokens is_subword = len(word) != len(word_ref) else: # This is a fallback heuristic. This will fail most likely on any kind of text + punctuation mixtures that will be considered "words". Non word aware models cannot do better than this unfortunately. if aggregation_strategy in { AggregationStrategy.FIRST, AggregationStrategy.AVERAGE, AggregationStrategy.MAX, }: warnings.warn( "Tokenizer does not support real words, using fallback heuristic", UserWarning, ) is_subword = start_ind > 0 and " " not in sentence[start_ind - 1 : start_ind + 1] if int(input_ids[idx]) == self.tokenizer.unk_token_id: word = word_ref is_subword = False else: start_ind = None end_ind = None is_subword = False pre_entity = { "word": word, "scores": token_scores, "start": start_ind, "end": end_ind, "index": idx, "is_subword": is_subword, } pre_entities.append(pre_entity) return pre_entities def aggregate(self, pre_entities: List[dict], aggregation_strategy: AggregationStrategy) -> List[dict]: if aggregation_strategy in {AggregationStrategy.NONE, AggregationStrategy.SIMPLE}: entities = [] for pre_entity in pre_entities: entity_idx = pre_entity["scores"].argmax() score = pre_entity["scores"][entity_idx] entity = { "entity": self.model.config.id2label[entity_idx], "score": score, "index": pre_entity["index"], "word": pre_entity["word"], "start": pre_entity["start"], "end": pre_entity["end"], } entities.append(entity) else: entities = self.aggregate_words(pre_entities, aggregation_strategy) if aggregation_strategy == AggregationStrategy.NONE: return entities return self.group_entities(entities) def aggregate_word(self, entities: List[dict], aggregation_strategy: AggregationStrategy) -> dict: word = self.tokenizer.convert_tokens_to_string([entity["word"] for entity in entities]) if aggregation_strategy == AggregationStrategy.FIRST: scores = entities[0]["scores"] idx = scores.argmax() score = scores[idx] entity = self.model.config.id2label[idx] elif aggregation_strategy == AggregationStrategy.MAX: max_entity = max(entities, key=lambda entity: entity["scores"].max()) scores = max_entity["scores"] idx = scores.argmax() score = scores[idx] entity = self.model.config.id2label[idx] elif aggregation_strategy == AggregationStrategy.AVERAGE: scores = np.stack([entity["scores"] for entity in entities]) average_scores = np.nanmean(scores, axis=0) entity_idx = average_scores.argmax() entity = self.model.config.id2label[entity_idx] score = average_scores[entity_idx] else: raise ValueError("Invalid aggregation_strategy") new_entity = { "entity": entity, "score": score, "word": word, "start": entities[0]["start"], "end": entities[-1]["end"], } return new_entity def aggregate_words(self, entities: List[dict], aggregation_strategy: AggregationStrategy) -> List[dict]: """ Override tokens from a given word that disagree to force agreement on word boundaries. Example: micro|soft| com|pany| B-ENT I-NAME I-ENT I-ENT will be rewritten with first strategy as microsoft| company| B-ENT I-ENT """ if aggregation_strategy in { AggregationStrategy.NONE, AggregationStrategy.SIMPLE, }: raise ValueError("NONE and SIMPLE strategies are invalid for word aggregation") word_entities = [] word_group = None for entity in entities: if word_group is None: word_group = [entity] elif entity["is_subword"]: word_group.append(entity) else: word_entities.append(self.aggregate_word(word_group, aggregation_strategy)) word_group = [entity] # Last item if word_group is not None: word_entities.append(self.aggregate_word(word_group, aggregation_strategy)) return word_entities def group_sub_entities(self, entities: List[dict]) -> dict: """ Group together the adjacent tokens with the same entity predicted. Args: entities (`dict`): The entities predicted by the pipeline. """ # Get the first entity in the entity group entity = entities[0]["entity"].split("-")[-1] scores = np.nanmean([entity["score"] for entity in entities]) tokens = [entity["word"] for entity in entities] entity_group = { "entity_group": entity, "score": np.mean(scores), "word": self.tokenizer.convert_tokens_to_string(tokens), "start": entities[0]["start"], "end": entities[-1]["end"], } return entity_group def get_tag(self, entity_name: str) -> Tuple[str, str]: if entity_name.startswith("B-"): bi = "B" tag = entity_name[2:] elif entity_name.startswith("I-"): bi = "I" tag = entity_name[2:] else: # It's not in B-, I- format # Default to I- for continuation. bi = "I" tag = entity_name return bi, tag def group_entities(self, entities: List[dict]) -> List[dict]: """ Find and group together the adjacent tokens with the same entity predicted. Args: entities (`dict`): The entities predicted by the pipeline. """ entity_groups = [] entity_group_disagg = [] for entity in entities: if not entity_group_disagg: entity_group_disagg.append(entity) continue # If the current entity is similar and adjacent to the previous entity, # append it to the disaggregated entity group # The split is meant to account for the "B" and "I" prefixes # Shouldn't merge if both entities are B-type bi, tag = self.get_tag(entity["entity"]) last_bi, last_tag = self.get_tag(entity_group_disagg[-1]["entity"]) if tag == last_tag and bi != "B": # Modify subword type to be previous_type entity_group_disagg.append(entity) else: # If the current entity is different from the previous entity # aggregate the disaggregated entity group entity_groups.append(self.group_sub_entities(entity_group_disagg)) entity_group_disagg = [entity] if entity_group_disagg: # it's the last entity, add it to the entity groups entity_groups.append(self.group_sub_entities(entity_group_disagg)) return entity_groups NerPipeline = TokenClassificationPipeline
26,656
45.603147
731
py
transformers
transformers-main/src/transformers/pipelines/video_classification.py
from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING logger = logging.get_logger(__name__) @add_end_docstrings(PIPELINE_INIT_ARGS) class VideoClassificationPipeline(Pipeline): """ Video classification pipeline using any `AutoModelForVideoClassification`. This pipeline predicts the class of a video. This video classification pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"video-classification"`. See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=video-classification). """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) requires_backends(self, "decord") self.check_model_type(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING) def _sanitize_parameters(self, top_k=None, num_frames=None, frame_sampling_rate=None): preprocess_params = {} if frame_sampling_rate is not None: preprocess_params["frame_sampling_rate"] = frame_sampling_rate if num_frames is not None: preprocess_params["num_frames"] = num_frames postprocess_params = {} if top_k is not None: postprocess_params["top_k"] = top_k return preprocess_params, {}, postprocess_params def __call__(self, videos: Union[str, List[str]], **kwargs): """ Assign labels to the video(s) passed as inputs. Args: videos (`str`, `List[str]`): The pipeline handles three types of videos: - A string containing a http link pointing to a video - A string containing a local path to a video The pipeline accepts either a single video or a batch of videos, which must then be passed as a string. Videos in a batch must all be in the same format: all as http links or all as local paths. top_k (`int`, *optional*, defaults to 5): The number of top labels that will be returned by the pipeline. If the provided number is higher than the number of labels available in the model configuration, it will default to the number of labels. num_frames (`int`, *optional*, defaults to `self.model.config.num_frames`): The number of frames sampled from the video to run the classification on. If not provided, will default to the number of frames specified in the model configuration. frame_sampling_rate (`int`, *optional*, defaults to 1): The sampling rate used to select frames from the video. If not provided, will default to 1, i.e. every frame will be used. Return: A dictionary or a list of dictionaries containing result. If the input is a single video, will return a dictionary, if the input is a list of several videos, will return a list of dictionaries corresponding to the videos. The dictionaries contain the following keys: - **label** (`str`) -- The label identified by the model. - **score** (`int`) -- The score attributed by the model for that label. """ return super().__call__(videos, **kwargs) def preprocess(self, video, num_frames=None, frame_sampling_rate=1): if num_frames is None: num_frames = self.model.config.num_frames if video.startswith("http://") or video.startswith("https://"): video = BytesIO(requests.get(video).content) videoreader = VideoReader(video) videoreader.seek(0) start_idx = 0 end_idx = num_frames * frame_sampling_rate - 1 indices = np.linspace(start_idx, end_idx, num=num_frames, dtype=np.int64) video = videoreader.get_batch(indices).asnumpy() video = list(video) model_inputs = self.image_processor(video, return_tensors=self.framework) return model_inputs def _forward(self, model_inputs): model_outputs = self.model(**model_inputs) return model_outputs def postprocess(self, model_outputs, top_k=5): if top_k > self.model.config.num_labels: top_k = self.model.config.num_labels if self.framework == "pt": probs = model_outputs.logits.softmax(-1)[0] scores, ids = probs.topk(top_k) else: raise ValueError(f"Unsupported framework: {self.framework}") scores = scores.tolist() ids = ids.tolist() return [{"score": score, "label": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)]
5,008
39.723577
119
py
transformers
transformers-main/src/transformers/pipelines/object_detection.py
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING logger = logging.get_logger(__name__) Prediction = Dict[str, Any] Predictions = List[Prediction] @add_end_docstrings(PIPELINE_INIT_ARGS) class ObjectDetectionPipeline(Pipeline): """ Object detection pipeline using any `AutoModelForObjectDetection`. This pipeline predicts bounding boxes of objects and their classes. Example: ```python >>> from transformers import pipeline >>> detector = pipeline(model="facebook/detr-resnet-50") >>> detector("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png") [{'score': 0.997, 'label': 'bird', 'box': {'xmin': 69, 'ymin': 171, 'xmax': 396, 'ymax': 507}}, {'score': 0.999, 'label': 'bird', 'box': {'xmin': 398, 'ymin': 105, 'xmax': 767, 'ymax': 507}}] >>> # x, y are expressed relative to the top left hand corner. ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This object detection pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"object-detection"`. See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=object-detection). """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.framework == "tf": raise ValueError(f"The {self.__class__} is only available in PyTorch.") requires_backends(self, "vision") self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items()) ) def _sanitize_parameters(self, **kwargs): postprocess_kwargs = {} if "threshold" in kwargs: postprocess_kwargs["threshold"] = kwargs["threshold"] return {}, {}, postprocess_kwargs def __call__(self, *args, **kwargs) -> Union[Predictions, List[Prediction]]: """ Detect objects (bounding boxes & classes) in the image(s) passed as inputs. Args: images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): The pipeline handles three types of images: - A string containing an HTTP(S) link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the same format: all as HTTP(S) links, all as local paths, or all as PIL images. threshold (`float`, *optional*, defaults to 0.9): The probability necessary to make a prediction. Return: A list of dictionaries or a list of list of dictionaries containing the result. If the input is a single image, will return a list of dictionaries, if the input is a list of several images, will return a list of list of dictionaries corresponding to each image. The dictionaries contain the following keys: - **label** (`str`) -- The class label identified by the model. - **score** (`float`) -- The score attributed by the model for that label. - **box** (`List[Dict[str, int]]`) -- The bounding box of detected object in image's original size. """ return super().__call__(*args, **kwargs) def preprocess(self, image): image = load_image(image) target_size = torch.IntTensor([[image.height, image.width]]) inputs = self.image_processor(images=[image], return_tensors="pt") if self.tokenizer is not None: inputs = self.tokenizer(text=inputs["words"], boxes=inputs["boxes"], return_tensors="pt") inputs["target_size"] = target_size return inputs def _forward(self, model_inputs): target_size = model_inputs.pop("target_size") outputs = self.model(**model_inputs) model_outputs = outputs.__class__({"target_size": target_size, **outputs}) if self.tokenizer is not None: model_outputs["bbox"] = model_inputs["bbox"] return model_outputs def postprocess(self, model_outputs, threshold=0.9): target_size = model_outputs["target_size"] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. height, width = target_size[0].tolist() def unnormalize(bbox): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ] ) ) scores, classes = model_outputs["logits"].squeeze(0).softmax(dim=-1).max(dim=-1) labels = [self.model.config.id2label[prediction] for prediction in classes.tolist()] boxes = [unnormalize(bbox) for bbox in model_outputs["bbox"].squeeze(0)] keys = ["score", "label", "box"] annotation = [dict(zip(keys, vals)) for vals in zip(scores.tolist(), labels, boxes) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel raw_annotations = self.image_processor.post_process_object_detection(model_outputs, threshold, target_size) raw_annotation = raw_annotations[0] scores = raw_annotation["scores"] labels = raw_annotation["labels"] boxes = raw_annotation["boxes"] raw_annotation["scores"] = scores.tolist() raw_annotation["labels"] = [self.model.config.id2label[label.item()] for label in labels] raw_annotation["boxes"] = [self._get_bounding_box(box) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] keys = ["score", "label", "box"] annotation = [ dict(zip(keys, vals)) for vals in zip(raw_annotation["scores"], raw_annotation["labels"], raw_annotation["boxes"]) ] return annotation def _get_bounding_box(self, box: "torch.Tensor") -> Dict[str, int]: """ Turns list [xmin, xmax, ymin, ymax] into dict { "xmin": xmin, ... } Args: box (`torch.Tensor`): Tensor containing the coordinates in corners format. Returns: bbox (`Dict[str, int]`): Dict containing the coordinates in corners format. """ if self.framework != "pt": raise ValueError("The ObjectDetectionPipeline is only available in PyTorch.") xmin, ymin, xmax, ymax = box.int().tolist() bbox = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
7,441
40.575419
195
py
transformers
transformers-main/src/transformers/pipelines/zero_shot_image_classification.py
from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax logger = logging.get_logger(__name__) @add_end_docstrings(PIPELINE_INIT_ARGS) class ZeroShotImageClassificationPipeline(Pipeline): """ Zero shot image classification pipeline using `CLIPModel`. This pipeline predicts the class of an image when you provide an image and a set of `candidate_labels`. Example: ```python >>> from transformers import pipeline >>> classifier = pipeline(model="openai/clip-vit-large-patch14") >>> classifier( ... "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", ... candidate_labels=["animals", "humans", "landscape"], ... ) [{'score': 0.965, 'label': 'animals'}, {'score': 0.03, 'label': 'humans'}, {'score': 0.005, 'label': 'landscape'}] >>> classifier( ... "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", ... candidate_labels=["black and white", "photorealist", "painting"], ... ) [{'score': 0.996, 'label': 'black and white'}, {'score': 0.003, 'label': 'photorealist'}, {'score': 0.0, 'label': 'painting'}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This image classification pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"zero-shot-image-classification"`. See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=zero-shot-image-classification). """ def __init__(self, **kwargs): super().__init__(**kwargs) requires_backends(self, "vision") self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__(self, images: Union[str, List[str], "Image", List["Image"]], **kwargs): """ Assign labels to the image(s) passed as inputs. Args: images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly candidate_labels (`List[str]`): The candidate labels for this image hypothesis_template (`str`, *optional*, defaults to `"This is a photo of {}"`): The sentence used in cunjunction with *candidate_labels* to attempt the image classification by replacing the placeholder with the candidate_labels. Then likelihood is estimated by using logits_per_image Return: A list of dictionaries containing result, one dictionary per proposed label. The dictionaries contain the following keys: - **label** (`str`) -- The label identified by the model. It is one of the suggested `candidate_label`. - **score** (`float`) -- The score attributed by the model for that label (between 0 and 1). """ return super().__call__(images, **kwargs) def _sanitize_parameters(self, **kwargs): preprocess_params = {} if "candidate_labels" in kwargs: preprocess_params["candidate_labels"] = kwargs["candidate_labels"] if "hypothesis_template" in kwargs: preprocess_params["hypothesis_template"] = kwargs["hypothesis_template"] return preprocess_params, {}, {} def preprocess(self, image, candidate_labels=None, hypothesis_template="This is a photo of {}."): image = load_image(image) inputs = self.image_processor(images=[image], return_tensors=self.framework) inputs["candidate_labels"] = candidate_labels sequences = [hypothesis_template.format(x) for x in candidate_labels] text_inputs = self.tokenizer(sequences, return_tensors=self.framework, padding=True) inputs["text_inputs"] = [text_inputs] return inputs def _forward(self, model_inputs): candidate_labels = model_inputs.pop("candidate_labels") text_inputs = model_inputs.pop("text_inputs") if isinstance(text_inputs[0], UserDict): text_inputs = text_inputs[0] else: # Batching case. text_inputs = text_inputs[0][0] outputs = self.model(**text_inputs, **model_inputs) model_outputs = { "candidate_labels": candidate_labels, "logits": outputs.logits_per_image, } return model_outputs def postprocess(self, model_outputs): candidate_labels = model_outputs.pop("candidate_labels") logits = model_outputs["logits"][0] if self.framework == "pt": probs = logits.softmax(dim=-1).squeeze(-1) scores = probs.tolist() if not isinstance(scores, list): scores = [scores] elif self.framework == "tf": probs = stable_softmax(logits, axis=-1) scores = probs.numpy().tolist() else: raise ValueError(f"Unsupported framework: {self.framework}") result = [ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(scores, candidate_labels), key=lambda x: -x[0]) ] return result
6,075
37.700637
130
py
transformers
transformers-main/src/transformers/pipelines/mask_generation.py
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING logger = logging.get_logger(__name__) @add_end_docstrings(PIPELINE_INIT_ARGS) class MaskGenerationPipeline(ChunkPipeline): """ Automatic mask generation for images using `SamForMaskGeneration`. This pipeline predicts binary masks for an image, given an image. It is a `ChunkPipeline` because you can seperate the points in a mini-batch in order to avoid OOM issues. Use the `points_per_batch` argument to control the number of points that will be processed at the same time. Default is `64`. The pipeline works in 3 steps: 1. `preprocess`: A grid of 1024 points evenly separated is generated along with bounding boxes and point labels. For more details on how the points and bounding boxes are created, check the `_generate_crop_boxes` function. The image is also preprocessed using the `image_processor`. This function `yields` a minibatch of `points_per_batch`. 2. `forward`: feeds the outputs of `preprocess` to the model. The image embedding is computed only once. Calls both `self.model.get_image_embeddings` and makes sure that the gradients are not computed, and the tensors and models are on the same device. 3. `postprocess`: The most important part of the automatic mask generation happens here. Three steps are induced: - image_processor.postprocess_masks (run on each minibatch loop): takes in the raw output masks, resizes them according to the image size, and transforms there to binary masks. - image_processor.filter_masks (on each minibatch loop): uses both `pred_iou_thresh` and `stability_scores`. Also applies a variety of filters based on non maximum suppression to remove bad masks. - image_processor.postprocess_masks_for_amg applies the NSM on the mask to only keep relevant ones. Arguments: model ([`PreTrainedModel`] or [`TFPreTrainedModel`]): The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from [`PreTrainedModel`] for PyTorch and [`TFPreTrainedModel`] for TensorFlow. tokenizer ([`PreTrainedTokenizer`]): The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from [`PreTrainedTokenizer`]. feature_extractor ([`SequenceFeatureExtractor`]): The feature extractor that will be used by the pipeline to encode the input. points_per_batch (*optional*, int, default to 64): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. output_bboxes_mask (`bool`, *optional*, default to `False`): Whether or not to output the bounding box predictions. output_rle_masks (`bool`, *optional*, default to `False`): Whether or not to output the masks in `RLE` format Example: ```python >>> from transformers import pipeline >>> generator = pipeline(model="facebook/sam-vit-base", task="mask-generation") >>> outputs = generator( ... "http://images.cocodataset.org/val2017/000000039769.jpg", ... ) >>> outputs = generator( ... "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", points_per_batch=128 ... ) ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This segmentation pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"mask-generation"`. See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=mask-generation). """ def __init__(self, **kwargs): super().__init__(**kwargs) requires_backends(self, "vision") requires_backends(self, "torch") if self.framework != "pt": raise ValueError(f"The {self.__class__} is only available in PyTorch.") self.check_model_type(MODEL_FOR_MASK_GENERATION_MAPPING) def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} postprocess_kwargs = {} forward_params = {} # preprocess args if "points_per_batch" in kwargs: preprocess_kwargs["points_per_batch"] = kwargs["points_per_batch"] if "points_per_crop" in kwargs: preprocess_kwargs["points_per_crop"] = kwargs["points_per_crop"] if "crops_n_layers" in kwargs: preprocess_kwargs["crops_n_layers"] = kwargs["crops_n_layers"] if "crop_overlap_ratio" in kwargs: preprocess_kwargs["crop_overlap_ratio"] = kwargs["crop_overlap_ratio"] if "crop_n_points_downscale_factor" in kwargs: preprocess_kwargs["crop_n_points_downscale_factor"] = kwargs["crop_n_points_downscale_factor"] # postprocess args if "pred_iou_thresh" in kwargs: forward_params["pred_iou_thresh"] = kwargs["pred_iou_thresh"] if "stability_score_offset" in kwargs: forward_params["stability_score_offset"] = kwargs["stability_score_offset"] if "mask_threshold" in kwargs: forward_params["mask_threshold"] = kwargs["mask_threshold"] if "stability_score_thresh" in kwargs: forward_params["stability_score_thresh"] = kwargs["stability_score_thresh"] if "crops_nms_thresh" in kwargs: postprocess_kwargs["crops_nms_thresh"] = kwargs["crops_nms_thresh"] if "output_rle_mask" in kwargs: postprocess_kwargs["output_rle_mask"] = kwargs["output_rle_mask"] if "output_bboxes_mask" in kwargs: postprocess_kwargs["output_bboxes_mask"] = kwargs["output_bboxes_mask"] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__(self, image, *args, num_workers=None, batch_size=None, **kwargs): """ Generates binary segmentation masks Args: inputs (`np.ndarray` or `bytes` or `str` or `dict`): Image or list of images. mask_threshold (`float`, *optional*, defaults to 0.0): Threshold to use when turning the predicted masks into binary values. pred_iou_thresh (`float`, *optional*, defaults to 0.88): A filtering threshold in `[0,1]` applied on the model's predicted mask quality. stability_score_thresh (`float`, *optional*, defaults to 0.95): A filtering threshold in `[0,1]`, using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (`int`, *optional*, defaults to 1): The amount to shift the cutoff when calculated the stability score. crops_nms_thresh (`float`, *optional*, defaults to 0.7): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crops_n_layers (`int`, *optional*, defaults to 0): If `crops_n_layers>0`, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crop_overlap_ratio (`float`, *optional*, defaults to `512 / 1500`): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (`int`, *optional*, defaults to `1`): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. Return: `Dict`: A dictionary with the following keys: - **mask** (`PIL.Image`) -- A binary mask of the detected object as a PIL Image of shape `(width, height)` of the original image. Returns a mask filled with zeros if no object is found. - **score** (*optional* `float`) -- Optionally, when the model is capable of estimating a confidence of the "object" described by the label and the mask. """ return super().__call__(image, *args, num_workers=num_workers, batch_size=batch_size, **kwargs) def preprocess( self, image, points_per_batch=64, crops_n_layers: int = 0, crop_overlap_ratio: float = 512 / 1500, points_per_crop: Optional[int] = 32, crop_n_points_downscale_factor: Optional[int] = 1, ): image = load_image(image) target_size = self.image_processor.size["longest_edge"] crop_boxes, grid_points, cropped_images, input_labels = self.image_processor.generate_crop_boxes( image, target_size, crops_n_layers, crop_overlap_ratio, points_per_crop, crop_n_points_downscale_factor ) model_inputs = self.image_processor(images=cropped_images, return_tensors="pt") with self.device_placement(): if self.framework == "pt": inference_context = self.get_inference_context() with inference_context(): model_inputs = self._ensure_tensor_on_device(model_inputs, device=self.device) image_embeddings = self.model.get_image_embeddings(model_inputs.pop("pixel_values")) model_inputs["image_embeddings"] = image_embeddings n_points = grid_points.shape[1] points_per_batch = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( "Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. " "To return all points at once, set points_per_batch to None" ) for i in range(0, n_points, points_per_batch): batched_points = grid_points[:, i : i + points_per_batch, :, :] labels = input_labels[:, i : i + points_per_batch] is_last = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def _forward( self, model_inputs, pred_iou_thresh=0.88, stability_score_thresh=0.95, mask_threshold=0, stability_score_offset=1, ): input_boxes = model_inputs.pop("input_boxes") is_last = model_inputs.pop("is_last") original_sizes = model_inputs.pop("original_sizes").tolist() reshaped_input_sizes = model_inputs.pop("reshaped_input_sizes").tolist() model_outputs = self.model(**model_inputs) # post processing happens here in order to avoid CPU GPU copies of ALL the masks low_resolution_masks = model_outputs["pred_masks"] masks = self.image_processor.post_process_masks( low_resolution_masks, original_sizes, reshaped_input_sizes, mask_threshold, binarize=False ) iou_scores = model_outputs["iou_scores"] masks, iou_scores, boxes = self.image_processor.filter_masks( masks[0], iou_scores[0], original_sizes[0], input_boxes[0], pred_iou_thresh, stability_score_thresh, mask_threshold, stability_score_offset, ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def postprocess( self, model_outputs, output_rle_mask=False, output_bboxes_mask=False, crops_nms_thresh=0.7, ): all_scores = [] all_masks = [] all_boxes = [] for model_output in model_outputs: all_scores.append(model_output.pop("iou_scores")) all_masks.extend(model_output.pop("masks")) all_boxes.append(model_output.pop("boxes")) all_scores = torch.cat(all_scores) all_boxes = torch.cat(all_boxes) output_masks, iou_scores, rle_mask, bounding_boxes = self.image_processor.post_process_for_mask_generation( all_masks, all_scores, all_boxes, crops_nms_thresh ) extra = defaultdict(list) for output in model_outputs: for k, v in output.items(): extra[k].append(v) optional = {} if output_rle_mask: optional["rle_mask"] = rle_mask if output_bboxes_mask: optional["bounding_boxes"] = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
13,291
45.313589
119
py
transformers
transformers-main/src/transformers/pipelines/pt_utils.py
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class PipelineDataset(Dataset): def __init__(self, dataset, process, params): self.dataset = dataset self.process = process self.params = params def __len__(self): return len(self.dataset) def __getitem__(self, i): item = self.dataset[i] processed = self.process(item, **self.params) return processed class PipelineIterator(IterableDataset): def __init__(self, loader, infer, params, loader_batch_size=None): """ Roughly equivalent to ``` for item in loader: yield infer(item, **params) ``` Arguments: loader (`torch.utils.data.DataLoader` or any iterator): The iterator that will be used to apply `infer` on. infer (any function): The function to apply of each element of `loader`. params (`dict`): The parameters passed to `infer` along with every item loader_batch_size (`int`, *optional*): If specified, the items of `loader` are supposed to come as batch, and are loader_batched here making it roughly behave as ``` for items in loader: for i in loader_batch_size: item = items[i] yield infer(item, **params) ```""" self.loader = loader self.infer = infer self.params = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether loader_batch_size = None self.loader_batch_size = loader_batch_size # Internal bookkeeping self._loader_batch_index = None self._loader_batch_data = None def __len__(self): return len(self.loader) def __iter__(self): self.iterator = iter(self.loader) return self def loader_batch_item(self): """ Return item located at `loader_batch_index` within the current `loader_batch_data`. """ if isinstance(self._loader_batch_data, torch.Tensor): # Batch data is simple tensor, just fetch the slice result = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) loader_batched = {} for k, element in self._loader_batch_data.items(): if isinstance(element, ModelOutput): # Convert ModelOutput to tuple first element = element.to_tuple() if isinstance(element[0], torch.Tensor): loader_batched[k] = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element) elif isinstance(element[0], np.ndarray): loader_batched[k] = tuple(np.expand_dims(el[self._loader_batch_index], 0) for el in element) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(element, tuple): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0], torch.Tensor): loader_batched[k] = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element) elif isinstance(element[0], np.ndarray): loader_batched[k] = tuple(np.expand_dims(el[self._loader_batch_index], 0) for el in element) continue if element is None: # This can happen for optional data that get passed around loader_batched[k] = None elif isinstance(element[self._loader_batch_index], torch.Tensor): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers loader_batched[k] = element[self._loader_batch_index].unsqueeze(0) elif isinstance(element[self._loader_batch_index], np.ndarray): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers loader_batched[k] = np.expand_dims(element[self._loader_batch_index], 0) else: # This is typically a list, so no need to `unsqueeze`. loader_batched[k] = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 result = self._loader_batch_data.__class__(loader_batched) self._loader_batch_index += 1 return result def __next__(self): if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch item = next(self.iterator) processed = self.infer(item, **self.params) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(processed, torch.Tensor): first_tensor = processed else: key = list(processed.keys())[0] first_tensor = processed[key] if isinstance(first_tensor, list): observed_batch_size = len(first_tensor) else: observed_batch_size = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. self.loader_batch_size = observed_batch_size # Setting internal index to unwrap the batch self._loader_batch_data = processed self._loader_batch_index = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class PipelineChunkIterator(PipelineIterator): def __init__(self, loader, infer, params, loader_batch_size=None): """ Roughly equivalent to ``` for iterator in loader: for item in iterator: yield infer(item, **params) ``` Arguments: loader (`torch.utils.data.DataLoader` or any iterator): The iterator that will be used to apply `infer` on. infer (any function): The function to apply of each element of `loader`. params (`dict`): The parameters passed to `infer` along with every item """ super().__init__(loader, infer, params) def __iter__(self): self.iterator = iter(self.loader) self.subiterator = None return self def __next__(self): if self.subiterator is None: "Subiterator None means we haven't started a `preprocess` iterator. so start it" self.subiterator = self.infer(next(self.iterator), **self.params) try: # Try to return next item processed = next(self.subiterator) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators self.subiterator = self.infer(next(self.iterator), **self.params) processed = next(self.subiterator) return processed class PipelinePackIterator(PipelineIterator): """ Roughly equivalent to ``` packed = [] for item in loader: packed.append(item) if item["is_last"]: yield packed packed = [] ``` but it also handles cases where `item` are batched (meaning it's a dict of Tensor with first dimension > 1. In that case it does ``` packed = [] for batch in loader: # item is batched for item in batch: packed.append(item) if item["is_last"]: yield packed packed = [] ``` Arguments: loader (`torch.utils.data.DataLoader` or any iterator): The iterator that will be used to apply `infer` on. infer (any function): The function to apply of each element of `loader`. params (`dict`): The parameters passed to `infer` along with every item loader_batch_size (`int`, *optional*): If specified, the items of `loader` are supposed to come as batch, and are loader_batched here making it roughly behave as ``` for items in loader: for i in loader_batch_size: item = items[i] yield infer(item, **params) ```""" def __iter__(self): self.iterator = iter(self.loader) return self def __next__(self): # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. is_last = False accumulator = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: item = self.loader_batch_item() is_last = item.pop("is_last") accumulator.append(item) if is_last: return accumulator while not is_last: processed = self.infer(next(self.iterator), **self.params) if self.loader_batch_size is not None: if isinstance(processed, torch.Tensor): first_tensor = processed else: key = list(processed.keys())[0] first_tensor = processed[key] if isinstance(first_tensor, list): observed_batch_size = len(first_tensor) else: observed_batch_size = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. self.loader_batch_size = observed_batch_size self._loader_batch_data = processed self._loader_batch_index = 0 while self._loader_batch_index < self.loader_batch_size: item = self.loader_batch_item() is_last = item.pop("is_last") accumulator.append(item) if is_last: return accumulator else: item = processed is_last = item.pop("is_last") accumulator.append(item) return accumulator class KeyDataset(Dataset): def __init__(self, dataset: Dataset, key: str): self.dataset = dataset self.key = key def __len__(self): return len(self.dataset) def __getitem__(self, i): return self.dataset[i][self.key] class KeyPairDataset(Dataset): def __init__(self, dataset: Dataset, key1: str, key2: str): self.dataset = dataset self.key1 = key1 self.key2 = key2 def __len__(self): return len(self.dataset) def __getitem__(self, i): return {"text": self.dataset[i][self.key1], "text_pair": self.dataset[i][self.key2]}
12,613
38.54232
118
py
transformers
transformers-main/src/transformers/pipelines/text_classification.py
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def sigmoid(_outputs): return 1.0 / (1.0 + np.exp(-_outputs)) def softmax(_outputs): maxes = np.max(_outputs, axis=-1, keepdims=True) shifted_exp = np.exp(_outputs - maxes) return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True) class ClassificationFunction(ExplicitEnum): SIGMOID = "sigmoid" SOFTMAX = "softmax" NONE = "none" @add_end_docstrings( PIPELINE_INIT_ARGS, r""" return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. """, ) class TextClassificationPipeline(Pipeline): """ Text classification pipeline using any `ModelForSequenceClassification`. See the [sequence classification examples](../task_summary#sequence-classification) for more information. Example: ```python >>> from transformers import pipeline >>> classifier = pipeline(model="distilbert-base-uncased-finetuned-sst-2-english") >>> classifier("This movie is disgustingly good !") [{'label': 'POSITIVE', 'score': 1.0}] >>> classifier("Director tried too much.") [{'label': 'NEGATIVE', 'score': 0.996}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This text classification pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"sentiment-analysis"` (for classifying sequences according to positive or negative sentiments). If multiple classification labels are available (`model.config.num_labels >= 2`), the pipeline will run a softmax over the results. If there is a single label, the pipeline will run a sigmoid over the result. The models that this pipeline can use are models that have been fine-tuned on a sequence classification task. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=text-classification). """ return_all_scores = False function_to_apply = ClassificationFunction.NONE def __init__(self, **kwargs): super().__init__(**kwargs) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def _sanitize_parameters(self, return_all_scores=None, function_to_apply=None, top_k="", **tokenizer_kwargs): # Using "" as default argument because we're going to use `top_k=None` in user code to declare # "No top_k" preprocess_params = tokenizer_kwargs postprocess_params = {} if hasattr(self.model.config, "return_all_scores") and return_all_scores is None: return_all_scores = self.model.config.return_all_scores if isinstance(top_k, int) or top_k is None: postprocess_params["top_k"] = top_k postprocess_params["_legacy"] = False elif return_all_scores is not None: warnings.warn( "`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of" " `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.", UserWarning, ) if return_all_scores: postprocess_params["top_k"] = None else: postprocess_params["top_k"] = 1 if isinstance(function_to_apply, str): function_to_apply = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: postprocess_params["function_to_apply"] = function_to_apply return preprocess_params, {}, postprocess_params def __call__(self, *args, **kwargs): """ Classify the text(s) given as inputs. Args: args (`str` or `List[str]` or `Dict[str]`, or `List[Dict[str]]`): One or several texts to classify. In order to use text pairs for your classification, you can send a dictionary containing `{"text", "text_pair"}` keys, or a list of those. top_k (`int`, *optional*, defaults to `1`): How many results to return. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: If this argument is not specified, then it will apply the following functions according to the number of labels: - If the model has a single label, will apply the sigmoid function on the output. - If the model has several labels, will apply the softmax function on the output. Possible values are: - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. Return: A list or a list of list of `dict`: Each result comes as list of dictionaries with the following keys: - **label** (`str`) -- The label predicted. - **score** (`float`) -- The corresponding probability. If `top_k` is used, one such dictionary is returned per label. """ result = super().__call__(*args, **kwargs) # TODO try and retrieve it in a nicer way from _sanitize_parameters. _legacy = "top_k" not in kwargs if isinstance(args[0], str) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def preprocess(self, inputs, **tokenizer_kwargs) -> Dict[str, GenericTensor]: return_tensors = self.framework if isinstance(inputs, dict): return self.tokenizer(**inputs, return_tensors=return_tensors, **tokenizer_kwargs) elif isinstance(inputs, list) and len(inputs) == 1 and isinstance(inputs[0], list) and len(inputs[0]) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0], text_pair=inputs[0][1], return_tensors=return_tensors, **tokenizer_kwargs ) elif isinstance(inputs, list): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( "The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a" ' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' ) return self.tokenizer(inputs, return_tensors=return_tensors, **tokenizer_kwargs) def _forward(self, model_inputs): return self.model(**model_inputs) def postprocess(self, model_outputs, function_to_apply=None, top_k=1, _legacy=True): # `_legacy` is used to determine if we're running the naked pipeline and in backward # compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running # the more natural result containing the list. # Default value before `set_parameters` if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: function_to_apply = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: function_to_apply = ClassificationFunction.SOFTMAX elif hasattr(self.model.config, "function_to_apply") and function_to_apply is None: function_to_apply = self.model.config.function_to_apply else: function_to_apply = ClassificationFunction.NONE outputs = model_outputs["logits"][0] outputs = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: scores = sigmoid(outputs) elif function_to_apply == ClassificationFunction.SOFTMAX: scores = softmax(outputs) elif function_to_apply == ClassificationFunction.NONE: scores = outputs else: raise ValueError(f"Unrecognized `function_to_apply` argument: {function_to_apply}") if top_k == 1 and _legacy: return {"label": self.model.config.id2label[scores.argmax().item()], "score": scores.max().item()} dict_scores = [ {"label": self.model.config.id2label[i], "score": score.item()} for i, score in enumerate(scores) ] if not _legacy: dict_scores.sort(key=lambda x: x["score"], reverse=True) if top_k is not None: dict_scores = dict_scores[:top_k] return dict_scores
10,043
44.243243
119
py
transformers
transformers-main/src/transformers/pipelines/depth_estimation.py
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING logger = logging.get_logger(__name__) @add_end_docstrings(PIPELINE_INIT_ARGS) class DepthEstimationPipeline(Pipeline): """ Depth estimation pipeline using any `AutoModelForDepthEstimation`. This pipeline predicts the depth of an image. Example: ```python >>> from transformers import pipeline >>> depth_estimator = pipeline(task="depth-estimation", model="Intel/dpt-large") >>> output = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg") >>> # This is a tensor with the values being the depth expressed in meters for each pixel >>> output["predicted_depth"].shape torch.Size([1, 384, 384]) ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This depth estimation pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"depth-estimation"`. See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=depth-estimation). """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) requires_backends(self, "vision") self.check_model_type(MODEL_FOR_DEPTH_ESTIMATION_MAPPING) def __call__(self, images: Union[str, List[str], "Image.Image", List["Image.Image"]], **kwargs): """ Assign labels to the image(s) passed as inputs. Args: images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images, which must then be passed as a string. Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL images. top_k (`int`, *optional*, defaults to 5): The number of top labels that will be returned by the pipeline. If the provided number is higher than the number of labels available in the model configuration, it will default to the number of labels. Return: A dictionary or a list of dictionaries containing result. If the input is a single image, will return a dictionary, if the input is a list of several images, will return a list of dictionaries corresponding to the images. The dictionaries contain the following keys: - **label** (`str`) -- The label identified by the model. - **score** (`int`) -- The score attributed by the model for that label. """ return super().__call__(images, **kwargs) def _sanitize_parameters(self, **kwargs): return {}, {}, {} def preprocess(self, image): image = load_image(image) self.image_size = image.size model_inputs = self.image_processor(images=image, return_tensors=self.framework) return model_inputs def _forward(self, model_inputs): model_outputs = self.model(**model_inputs) return model_outputs def postprocess(self, model_outputs): predicted_depth = model_outputs.predicted_depth prediction = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1), size=self.image_size[::-1], mode="bicubic", align_corners=False ) output = prediction.squeeze().cpu().numpy() formatted = (output * 255 / np.max(output)).astype("uint8") depth = Image.fromarray(formatted) output_dict = {} output_dict["predicted_depth"] = predicted_depth output_dict["depth"] = depth return output_dict
4,258
38.073394
119
py
transformers
transformers-main/src/transformers/pipelines/fill_mask.py
from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch logger = logging.get_logger(__name__) @add_end_docstrings( PIPELINE_INIT_ARGS, r""" top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). """, ) class FillMaskPipeline(Pipeline): """ Masked language modeling prediction pipeline using any `ModelWithLMHead`. See the [masked language modeling examples](../task_summary#masked-language-modeling) for more information. Example: ```python >>> from transformers import pipeline >>> fill_masker = pipeline(model="bert-base-uncased") >>> fill_masker("This is a simple [MASK].") [{'score': 0.042, 'token': 3291, 'token_str': 'problem', 'sequence': 'this is a simple problem.'}, {'score': 0.031, 'token': 3160, 'token_str': 'question', 'sequence': 'this is a simple question.'}, {'score': 0.03, 'token': 8522, 'token_str': 'equation', 'sequence': 'this is a simple equation.'}, {'score': 0.027, 'token': 2028, 'token_str': 'one', 'sequence': 'this is a simple one.'}, {'score': 0.024, 'token': 3627, 'token_str': 'rule', 'sequence': 'this is a simple rule.'}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This mask filling pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"fill-mask"`. The models that this pipeline can use are models that have been trained with a masked language modeling objective, which includes the bi-directional models in the library. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=fill-mask). <Tip> This pipeline only works for inputs with exactly one token masked. Experimental: We added support for multiple masks. The returned values are raw model output, and correspond to disjoint probabilities where one might expect joint probabilities (See [discussion](https://github.com/huggingface/transformers/pull/10222)). </Tip>""" def get_masked_index(self, input_ids: GenericTensor) -> np.ndarray: if self.framework == "tf": masked_index = tf.where(input_ids == self.tokenizer.mask_token_id).numpy() elif self.framework == "pt": masked_index = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=False) else: raise ValueError("Unsupported framework") return masked_index def _ensure_exactly_one_mask_token(self, input_ids: GenericTensor) -> np.ndarray: masked_index = self.get_masked_index(input_ids) numel = np.prod(masked_index.shape) if numel < 1: raise PipelineException( "fill-mask", self.model.base_model_prefix, f"No mask_token ({self.tokenizer.mask_token}) found on the input", ) def ensure_exactly_one_mask_token(self, model_inputs: GenericTensor): if isinstance(model_inputs, list): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input["input_ids"][0]) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(input_ids) def preprocess(self, inputs, return_tensors=None, **preprocess_parameters) -> Dict[str, GenericTensor]: if return_tensors is None: return_tensors = self.framework model_inputs = self.tokenizer(inputs, return_tensors=return_tensors) self.ensure_exactly_one_mask_token(model_inputs) return model_inputs def _forward(self, model_inputs): model_outputs = self.model(**model_inputs) model_outputs["input_ids"] = model_inputs["input_ids"] return model_outputs def postprocess(self, model_outputs, top_k=5, target_ids=None): # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: top_k = target_ids.shape[0] input_ids = model_outputs["input_ids"][0] outputs = model_outputs["logits"] if self.framework == "tf": masked_index = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()[:, 0] outputs = outputs.numpy() logits = outputs[0, masked_index, :] probs = stable_softmax(logits, axis=-1) if target_ids is not None: probs = tf.gather_nd(tf.squeeze(probs, 0), target_ids.reshape(-1, 1)) probs = tf.expand_dims(probs, 0) topk = tf.math.top_k(probs, k=top_k) values, predictions = topk.values.numpy(), topk.indices.numpy() else: masked_index = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=False).squeeze(-1) # Fill mask pipeline supports only one ${mask_token} per sample logits = outputs[0, masked_index, :] probs = logits.softmax(dim=-1) if target_ids is not None: probs = probs[..., target_ids] values, predictions = probs.topk(top_k) result = [] single_mask = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist(), predictions.tolist())): row = [] for v, p in zip(_values, _predictions): # Copy is important since we're going to modify this array in place tokens = input_ids.numpy().copy() if target_ids is not None: p = target_ids[p].tolist() tokens[masked_index[i]] = p # Filter padding out: tokens = tokens[np.where(tokens != self.tokenizer.pad_token_id)] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back sequence = self.tokenizer.decode(tokens, skip_special_tokens=single_mask) proposition = {"score": v, "token": p, "token_str": self.tokenizer.decode([p]), "sequence": sequence} row.append(proposition) result.append(row) if single_mask: return result[0] return result def get_target_ids(self, targets, top_k=None): if isinstance(targets, str): targets = [targets] try: vocab = self.tokenizer.get_vocab() except Exception: vocab = {} target_ids = [] for target in targets: id_ = vocab.get(target, None) if id_ is None: input_ids = self.tokenizer( target, add_special_tokens=False, return_attention_mask=False, return_token_type_ids=False, max_length=1, truncation=True, )["input_ids"] if len(input_ids) == 0: logger.warning( f"The specified target token `{target}` does not exist in the model vocabulary. " "We cannot replace it with anything meaningful, ignoring it" ) continue id_ = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( f"The specified target token `{target}` does not exist in the model vocabulary. " f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_)}`." ) target_ids.append(id_) target_ids = list(set(target_ids)) if len(target_ids) == 0: raise ValueError("At least one target must be provided when passed.") target_ids = np.array(target_ids) return target_ids def _sanitize_parameters(self, top_k=None, targets=None): postprocess_params = {} if targets is not None: target_ids = self.get_target_ids(targets, top_k) postprocess_params["target_ids"] = target_ids if top_k is not None: postprocess_params["top_k"] = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( "fill-mask", self.model.base_model_prefix, "The tokenizer does not define a `mask_token`." ) return {}, {}, postprocess_params def __call__(self, inputs, *args, **kwargs): """ Fill the masked token in the text(s) given as inputs. Args: args (`str` or `List[str]`): One or several texts (or one list of prompts) with masked tokens. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). top_k (`int`, *optional*): When passed, overrides the number of predictions to return. Return: A list or a list of list of `dict`: Each result comes as list of dictionaries with the following keys: - **sequence** (`str`) -- The corresponding input with the mask token prediction. - **score** (`float`) -- The corresponding probability. - **token** (`int`) -- The predicted token id (to replace the masked one). - **token_str** (`str`) -- The predicted token (to replace the masked one). """ outputs = super().__call__(inputs, **kwargs) if isinstance(inputs, list) and len(inputs) == 1: return outputs[0] return outputs
10,691
43
483
py
transformers
transformers-main/src/transformers/pipelines/question_answering.py
import types import warnings from collections.abc import Iterable from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union import numpy as np from ..data import SquadExample, SquadFeatures, squad_convert_examples_to_features from ..modelcard import ModelCard from ..tokenization_utils import PreTrainedTokenizer from ..utils import ( PaddingStrategy, add_end_docstrings, is_tf_available, is_tokenizers_available, is_torch_available, logging, ) from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline logger = logging.get_logger(__name__) if TYPE_CHECKING: from ..modeling_tf_utils import TFPreTrainedModel from ..modeling_utils import PreTrainedModel if is_tokenizers_available(): import tokenizers if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING Dataset = None if is_torch_available(): import torch from torch.utils.data import Dataset from ..models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING def decode_spans( start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int, undesired_tokens: np.ndarray ) -> Tuple: """ Take the output of any `ModelForQuestionAnswering` and will generate probabilities for each span to be the actual answer. In addition, it filters out some unwanted/impossible cases like answer len being greater than max_answer_len or answer end position being before the starting position. The method supports output the k-best answer through the topk argument. Args: start (`np.ndarray`): Individual start probabilities for each token. end (`np.ndarray`): Individual end probabilities for each token. topk (`int`): Indicates how many possible answer span(s) to extract from the model output. max_answer_len (`int`): Maximum size of the answer to extract from the model's output. undesired_tokens (`np.ndarray`): Mask determining tokens that can be part of the answer """ # Ensure we have batch axis if start.ndim == 1: start = start[None] if end.ndim == 1: end = end[None] # Compute the score of each tuple(start, end) to be the real answer outer = np.matmul(np.expand_dims(start, -1), np.expand_dims(end, 1)) # Remove candidate with end < start and end - start > max_answer_len candidates = np.tril(np.triu(outer), max_answer_len - 1) # Inspired by Chen & al. (https://github.com/facebookresearch/DrQA) scores_flat = candidates.flatten() if topk == 1: idx_sort = [np.argmax(scores_flat)] elif len(scores_flat) < topk: idx_sort = np.argsort(-scores_flat) else: idx = np.argpartition(-scores_flat, topk)[0:topk] idx_sort = idx[np.argsort(-scores_flat[idx])] starts, ends = np.unravel_index(idx_sort, candidates.shape)[1:] desired_spans = np.isin(starts, undesired_tokens.nonzero()) & np.isin(ends, undesired_tokens.nonzero()) starts = starts[desired_spans] ends = ends[desired_spans] scores = candidates[0, starts, ends] return starts, ends, scores def select_starts_ends( start, end, p_mask, attention_mask, min_null_score=1000000, top_k=1, handle_impossible_answer=False, max_answer_len=15, ): """ Takes the raw output of any `ModelForQuestionAnswering` and first normalizes its outputs and then uses `decode_spans()` to generate probabilities for each span to be the actual answer. Args: start (`np.ndarray`): Individual start logits for each token. end (`np.ndarray`): Individual end logits for each token. p_mask (`np.ndarray`): A mask with 1 for values that cannot be in the answer attention_mask (`np.ndarray`): The attention mask generated by the tokenizer min_null_score(`float`): The minimum null (empty) answer score seen so far. topk (`int`): Indicates how many possible answer span(s) to extract from the model output. handle_impossible_answer(`bool`): Whether to allow null (empty) answers max_answer_len (`int`): Maximum size of the answer to extract from the model's output. """ # Ensure padded tokens & question tokens cannot belong to the set of candidate answers. undesired_tokens = np.abs(np.array(p_mask) - 1) if attention_mask is not None: undesired_tokens = undesired_tokens & attention_mask # Generate mask undesired_tokens_mask = undesired_tokens == 0.0 # Make sure non-context indexes in the tensor cannot contribute to the softmax start = np.where(undesired_tokens_mask, -10000.0, start) end = np.where(undesired_tokens_mask, -10000.0, end) # Normalize logits and spans to retrieve the answer start = np.exp(start - start.max(axis=-1, keepdims=True)) start = start / start.sum() end = np.exp(end - end.max(axis=-1, keepdims=True)) end = end / end.sum() if handle_impossible_answer: min_null_score = min(min_null_score, (start[0, 0] * end[0, 0]).item()) # Mask CLS start[0, 0] = end[0, 0] = 0.0 starts, ends, scores = decode_spans(start, end, top_k, max_answer_len, undesired_tokens) return starts, ends, scores, min_null_score class QuestionAnsweringArgumentHandler(ArgumentHandler): """ QuestionAnsweringPipeline requires the user to provide multiple arguments (i.e. question & context) to be mapped to internal [`SquadExample`]. QuestionAnsweringArgumentHandler manages all the possible to create a [`SquadExample`] from the command-line supplied arguments. """ def normalize(self, item): if isinstance(item, SquadExample): return item elif isinstance(item, dict): for k in ["question", "context"]: if k not in item: raise KeyError("You need to provide a dictionary with keys {question:..., context:...}") elif item[k] is None: raise ValueError(f"`{k}` cannot be None") elif isinstance(item[k], str) and len(item[k]) == 0: raise ValueError(f"`{k}` cannot be empty") return QuestionAnsweringPipeline.create_sample(**item) raise ValueError(f"{item} argument needs to be of type (SquadExample, dict)") def __call__(self, *args, **kwargs): # Detect where the actual inputs are if args is not None and len(args) > 0: if len(args) == 1: inputs = args[0] elif len(args) == 2 and {type(el) for el in args} == {str}: inputs = [{"question": args[0], "context": args[1]}] else: inputs = list(args) # Generic compatibility with sklearn and Keras # Batched data elif "X" in kwargs: inputs = kwargs["X"] elif "data" in kwargs: inputs = kwargs["data"] elif "question" in kwargs and "context" in kwargs: if isinstance(kwargs["question"], list) and isinstance(kwargs["context"], str): inputs = [{"question": Q, "context": kwargs["context"]} for Q in kwargs["question"]] elif isinstance(kwargs["question"], list) and isinstance(kwargs["context"], list): if len(kwargs["question"]) != len(kwargs["context"]): raise ValueError("Questions and contexts don't have the same lengths") inputs = [{"question": Q, "context": C} for Q, C in zip(kwargs["question"], kwargs["context"])] elif isinstance(kwargs["question"], str) and isinstance(kwargs["context"], str): inputs = [{"question": kwargs["question"], "context": kwargs["context"]}] else: raise ValueError("Arguments can't be understood") else: raise ValueError(f"Unknown arguments {kwargs}") # When user is sending a generator we need to trust it's a valid example generator_types = (types.GeneratorType, Dataset) if Dataset is not None else (types.GeneratorType,) if isinstance(inputs, generator_types): return inputs # Normalize inputs if isinstance(inputs, dict): inputs = [inputs] elif isinstance(inputs, Iterable): # Copy to avoid overriding arguments inputs = list(inputs) else: raise ValueError(f"Invalid arguments {kwargs}") for i, item in enumerate(inputs): inputs[i] = self.normalize(item) return inputs @add_end_docstrings(PIPELINE_INIT_ARGS) class QuestionAnsweringPipeline(ChunkPipeline): """ Question Answering pipeline using any `ModelForQuestionAnswering`. See the [question answering examples](../task_summary#question-answering) for more information. Example: ```python >>> from transformers import pipeline >>> oracle = pipeline(model="deepset/roberta-base-squad2") >>> oracle(question="Where do I live?", context="My name is Wolfgang and I live in Berlin") {'score': 0.9191, 'start': 34, 'end': 40, 'answer': 'Berlin'} ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This question answering pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"question-answering"`. The models that this pipeline can use are models that have been fine-tuned on a question answering task. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=question-answering). """ default_input_names = "question,context" handle_impossible_answer = False def __init__( self, model: Union["PreTrainedModel", "TFPreTrainedModel"], tokenizer: PreTrainedTokenizer, modelcard: Optional[ModelCard] = None, framework: Optional[str] = None, task: str = "", **kwargs, ): super().__init__( model=model, tokenizer=tokenizer, modelcard=modelcard, framework=framework, task=task, **kwargs, ) self._args_parser = QuestionAnsweringArgumentHandler() self.check_model_type( TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING if self.framework == "tf" else MODEL_FOR_QUESTION_ANSWERING_MAPPING ) @staticmethod def create_sample( question: Union[str, List[str]], context: Union[str, List[str]] ) -> Union[SquadExample, List[SquadExample]]: """ QuestionAnsweringPipeline leverages the [`SquadExample`] internally. This helper method encapsulate all the logic for converting question(s) and context(s) to [`SquadExample`]. We currently support extractive question answering. Arguments: question (`str` or `List[str]`): The question(s) asked. context (`str` or `List[str]`): The context(s) in which we will look for the answer. Returns: One or a list of [`SquadExample`]: The corresponding [`SquadExample`] grouping question and context. """ if isinstance(question, list): return [SquadExample(None, q, c, None, None, None) for q, c in zip(question, context)] else: return SquadExample(None, question, context, None, None, None) def _sanitize_parameters( self, padding=None, topk=None, top_k=None, doc_stride=None, max_answer_len=None, max_seq_len=None, max_question_len=None, handle_impossible_answer=None, align_to_words=None, **kwargs, ): # Set defaults values preprocess_params = {} if padding is not None: preprocess_params["padding"] = padding if doc_stride is not None: preprocess_params["doc_stride"] = doc_stride if max_question_len is not None: preprocess_params["max_question_len"] = max_question_len if max_seq_len is not None: preprocess_params["max_seq_len"] = max_seq_len postprocess_params = {} if topk is not None and top_k is None: warnings.warn("topk parameter is deprecated, use top_k instead", UserWarning) top_k = topk if top_k is not None: if top_k < 1: raise ValueError(f"top_k parameter should be >= 1 (got {top_k})") postprocess_params["top_k"] = top_k if max_answer_len is not None: if max_answer_len < 1: raise ValueError(f"max_answer_len parameter should be >= 1 (got {max_answer_len}") if max_answer_len is not None: postprocess_params["max_answer_len"] = max_answer_len if handle_impossible_answer is not None: postprocess_params["handle_impossible_answer"] = handle_impossible_answer if align_to_words is not None: postprocess_params["align_to_words"] = align_to_words return preprocess_params, {}, postprocess_params def __call__(self, *args, **kwargs): """ Answer the question(s) given as inputs by using the context(s). Args: args ([`SquadExample`] or a list of [`SquadExample`]): One or several [`SquadExample`] containing the question and context. X ([`SquadExample`] or a list of [`SquadExample`], *optional*): One or several [`SquadExample`] containing the question and context (will be treated the same way as if passed as the first positional argument). data ([`SquadExample`] or a list of [`SquadExample`], *optional*): One or several [`SquadExample`] containing the question and context (will be treated the same way as if passed as the first positional argument). question (`str` or `List[str]`): One or several question(s) (must be used in conjunction with the `context` argument). context (`str` or `List[str]`): One or several context(s) associated with the question(s) (must be used in conjunction with the `question` argument). topk (`int`, *optional*, defaults to 1): The number of answers to return (will be chosen by order of likelihood). Note that we return less than topk answers if there are not enough options available within the context. doc_stride (`int`, *optional*, defaults to 128): If the context is too long to fit with the question for the model, it will be split in several chunks with some overlap. This argument controls the size of that overlap. max_answer_len (`int`, *optional*, defaults to 15): The maximum length of predicted answers (e.g., only answers with a shorter length are considered). max_seq_len (`int`, *optional*, defaults to 384): The maximum length of the total sentence (context + question) in tokens of each chunk passed to the model. The context will be split in several chunks (using `doc_stride` as overlap) if needed. max_question_len (`int`, *optional*, defaults to 64): The maximum length of the question after tokenization. It will be truncated if needed. handle_impossible_answer (`bool`, *optional*, defaults to `False`): Whether or not we accept impossible as an answer. align_to_words (`bool`, *optional*, defaults to `True`): Attempts to align the answer to real words. Improves quality on space separated langages. Might hurt on non-space-separated languages (like Japanese or Chinese) Return: A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys: - **score** (`float`) -- The probability associated to the answer. - **start** (`int`) -- The character start index of the answer (in the tokenized version of the input). - **end** (`int`) -- The character end index of the answer (in the tokenized version of the input). - **answer** (`str`) -- The answer to the question. """ # Convert inputs to features examples = self._args_parser(*args, **kwargs) if isinstance(examples, (list, tuple)) and len(examples) == 1: return super().__call__(examples[0], **kwargs) return super().__call__(examples, **kwargs) def preprocess(self, example, padding="do_not_pad", doc_stride=None, max_question_len=64, max_seq_len=None): # XXX: This is specal, args_parser will not handle anything generator or dataset like # For those we expect user to send a simple valid example either directly as a SquadExample or simple dict. # So we still need a little sanitation here. if isinstance(example, dict): example = SquadExample(None, example["question"], example["context"], None, None, None) if max_seq_len is None: max_seq_len = min(self.tokenizer.model_max_length, 384) if doc_stride is None: doc_stride = min(max_seq_len // 2, 128) if doc_stride > max_seq_len: raise ValueError(f"`doc_stride` ({doc_stride}) is larger than `max_seq_len` ({max_seq_len})") if not self.tokenizer.is_fast: features = squad_convert_examples_to_features( examples=[example], tokenizer=self.tokenizer, max_seq_length=max_seq_len, doc_stride=doc_stride, max_query_length=max_question_len, padding_strategy=PaddingStrategy.MAX_LENGTH, is_training=False, tqdm_enabled=False, ) else: # Define the side we want to truncate / pad and the text/pair sorting question_first = self.tokenizer.padding_side == "right" encoded_inputs = self.tokenizer( text=example.question_text if question_first else example.context_text, text_pair=example.context_text if question_first else example.question_text, padding=padding, truncation="only_second" if question_first else "only_first", max_length=max_seq_len, stride=doc_stride, return_token_type_ids=True, return_overflowing_tokens=True, return_offsets_mapping=True, return_special_tokens_mask=True, ) # When the input is too long, it's converted in a batch of inputs with overflowing tokens # and a stride of overlap between the inputs. If a batch of inputs is given, a special output # "overflow_to_sample_mapping" indicate which member of the encoded batch belong to which original batch sample. # Here we tokenize examples one-by-one so we don't need to use "overflow_to_sample_mapping". # "num_span" is the number of output samples generated from the overflowing tokens. num_spans = len(encoded_inputs["input_ids"]) # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer) # We put 0 on the tokens from the context and 1 everywhere else (question and special tokens) p_mask = [ [tok != 1 if question_first else 0 for tok in encoded_inputs.sequence_ids(span_id)] for span_id in range(num_spans) ] features = [] for span_idx in range(num_spans): input_ids_span_idx = encoded_inputs["input_ids"][span_idx] attention_mask_span_idx = ( encoded_inputs["attention_mask"][span_idx] if "attention_mask" in encoded_inputs else None ) token_type_ids_span_idx = ( encoded_inputs["token_type_ids"][span_idx] if "token_type_ids" in encoded_inputs else None ) # keep the cls_token unmasked (some models use it to indicate unanswerable questions) if self.tokenizer.cls_token_id is not None: cls_indices = np.nonzero(np.array(input_ids_span_idx) == self.tokenizer.cls_token_id)[0] for cls_index in cls_indices: p_mask[span_idx][cls_index] = 0 submask = p_mask[span_idx] features.append( SquadFeatures( input_ids=input_ids_span_idx, attention_mask=attention_mask_span_idx, token_type_ids=token_type_ids_span_idx, p_mask=submask, encoding=encoded_inputs[span_idx], # We don't use the rest of the values - and actually # for Fast tokenizer we could totally avoid using SquadFeatures and SquadExample cls_index=None, token_to_orig_map={}, example_index=0, unique_id=0, paragraph_len=0, token_is_max_context=0, tokens=[], start_position=0, end_position=0, is_impossible=False, qas_id=None, ) ) for i, feature in enumerate(features): fw_args = {} others = {} model_input_names = self.tokenizer.model_input_names + ["p_mask", "token_type_ids"] for k, v in feature.__dict__.items(): if k in model_input_names: if self.framework == "tf": tensor = tf.constant(v) if tensor.dtype == tf.int64: tensor = tf.cast(tensor, tf.int32) fw_args[k] = tf.expand_dims(tensor, 0) elif self.framework == "pt": tensor = torch.tensor(v) if tensor.dtype == torch.int32: tensor = tensor.long() fw_args[k] = tensor.unsqueeze(0) else: others[k] = v is_last = i == len(features) - 1 yield {"example": example, "is_last": is_last, **fw_args, **others} def _forward(self, inputs): example = inputs["example"] model_inputs = {k: inputs[k] for k in self.tokenizer.model_input_names} output = self.model(**model_inputs) if isinstance(output, dict): return {"start": output["start_logits"], "end": output["end_logits"], "example": example, **inputs} else: start, end = output[:2] return {"start": start, "end": end, "example": example, **inputs} def postprocess( self, model_outputs, top_k=1, handle_impossible_answer=False, max_answer_len=15, align_to_words=True, ): min_null_score = 1000000 # large and positive answers = [] for output in model_outputs: start_ = output["start"] end_ = output["end"] example = output["example"] p_mask = output["p_mask"] attention_mask = ( output["attention_mask"].numpy() if output.get("attention_mask", None) is not None else None ) starts, ends, scores, min_null_score = select_starts_ends( start_, end_, p_mask, attention_mask, min_null_score, top_k, handle_impossible_answer, max_answer_len ) if not self.tokenizer.is_fast: char_to_word = np.array(example.char_to_word_offset) # Convert the answer (tokens) back to the original text # Score: score from the model # Start: Index of the first character of the answer in the context string # End: Index of the character following the last character of the answer in the context string # Answer: Plain text of the answer for s, e, score in zip(starts, ends, scores): token_to_orig_map = output["token_to_orig_map"] answers.append( { "score": score.item(), "start": np.where(char_to_word == token_to_orig_map[s])[0][0].item(), "end": np.where(char_to_word == token_to_orig_map[e])[0][-1].item(), "answer": " ".join(example.doc_tokens[token_to_orig_map[s] : token_to_orig_map[e] + 1]), } ) else: # Convert the answer (tokens) back to the original text # Score: score from the model # Start: Index of the first character of the answer in the context string # End: Index of the character following the last character of the answer in the context string # Answer: Plain text of the answer question_first = bool(self.tokenizer.padding_side == "right") enc = output["encoding"] # Encoding was *not* padded, input_ids *might*. # It doesn't make a difference unless we're padding on # the left hand side, since now we have different offsets # everywhere. if self.tokenizer.padding_side == "left": offset = (output["input_ids"] == self.tokenizer.pad_token_id).numpy().sum() else: offset = 0 # Sometimes the max probability token is in the middle of a word so: # - we start by finding the right word containing the token with `token_to_word` # - then we convert this word in a character span with `word_to_chars` sequence_index = 1 if question_first else 0 for s, e, score in zip(starts, ends, scores): s = s - offset e = e - offset start_index, end_index = self.get_indices(enc, s, e, sequence_index, align_to_words) answers.append( { "score": score.item(), "start": start_index, "end": end_index, "answer": example.context_text[start_index:end_index], } ) if handle_impossible_answer: answers.append({"score": min_null_score, "start": 0, "end": 0, "answer": ""}) answers = sorted(answers, key=lambda x: x["score"], reverse=True)[:top_k] if len(answers) == 1: return answers[0] return answers def get_indices( self, enc: "tokenizers.Encoding", s: int, e: int, sequence_index: int, align_to_words: bool ) -> Tuple[int, int]: if align_to_words: try: start_word = enc.token_to_word(s) end_word = enc.token_to_word(e) start_index = enc.word_to_chars(start_word, sequence_index=sequence_index)[0] end_index = enc.word_to_chars(end_word, sequence_index=sequence_index)[1] except Exception: # Some tokenizers don't really handle words. Keep to offsets then. start_index = enc.offsets[s][0] end_index = enc.offsets[e][1] else: start_index = enc.offsets[s][0] end_index = enc.offsets[e][1] return start_index, end_index def span_to_answer(self, text: str, start: int, end: int) -> Dict[str, Union[str, int]]: """ When decoding from token probabilities, this method maps token indexes to actual word in the initial context. Args: text (`str`): The actual context to extract the answer from. start (`int`): The answer starting token index. end (`int`): The answer end token index. Returns: Dictionary like `{'answer': str, 'start': int, 'end': int}` """ words = [] token_idx = char_start_idx = char_end_idx = chars_idx = 0 for i, word in enumerate(text.split(" ")): token = self.tokenizer.tokenize(word) # Append words if they are in the span if start <= token_idx <= end: if token_idx == start: char_start_idx = chars_idx if token_idx == end: char_end_idx = chars_idx + len(word) words += [word] # Stop if we went over the end of the answer if token_idx > end: break # Append the subtokenization length to the running index token_idx += len(token) chars_idx += len(word) + 1 # Join text with spaces return { "answer": " ".join(words), "start": max(0, char_start_idx), "end": min(len(text), char_end_idx), }
29,474
43.323308
124
py
transformers
transformers-main/src/transformers/pipelines/document_question_answering.py
# Copyright 2022 The Impira Team and the HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from typing import List, Optional, Tuple, Union import numpy as np from ..utils import ( ExplicitEnum, add_end_docstrings, is_pytesseract_available, is_torch_available, is_vision_available, logging, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline from .question_answering import select_starts_ends if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING TESSERACT_LOADED = False if is_pytesseract_available(): TESSERACT_LOADED = True import pytesseract logger = logging.get_logger(__name__) # normalize_bbox() and apply_tesseract() are derived from apply_tesseract in models/layoutlmv3/feature_extraction_layoutlmv3.py. # However, because the pipeline may evolve from what layoutlmv3 currently does, it's copied (vs. imported) to avoid creating an # unnecessary dependency. def normalize_box(box, width, height): return [ int(1000 * (box[0] / width)), int(1000 * (box[1] / height)), int(1000 * (box[2] / width)), int(1000 * (box[3] / height)), ] def apply_tesseract(image: "Image.Image", lang: Optional[str], tesseract_config: Optional[str]): """Applies Tesseract OCR on a document image, and returns recognized words + normalized bounding boxes.""" # apply OCR data = pytesseract.image_to_data(image, lang=lang, output_type="dict", config=tesseract_config) words, left, top, width, height = data["text"], data["left"], data["top"], data["width"], data["height"] # filter empty words and corresponding coordinates irrelevant_indices = [idx for idx, word in enumerate(words) if not word.strip()] words = [word for idx, word in enumerate(words) if idx not in irrelevant_indices] left = [coord for idx, coord in enumerate(left) if idx not in irrelevant_indices] top = [coord for idx, coord in enumerate(top) if idx not in irrelevant_indices] width = [coord for idx, coord in enumerate(width) if idx not in irrelevant_indices] height = [coord for idx, coord in enumerate(height) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format actual_boxes = [] for x, y, w, h in zip(left, top, width, height): actual_box = [x, y, x + w, y + h] actual_boxes.append(actual_box) image_width, image_height = image.size # finally, normalize the bounding boxes normalized_boxes = [] for box in actual_boxes: normalized_boxes.append(normalize_box(box, image_width, image_height)) if len(words) != len(normalized_boxes): raise ValueError("Not as many words as there are bounding boxes") return words, normalized_boxes class ModelType(ExplicitEnum): LayoutLM = "layoutlm" LayoutLMv2andv3 = "layoutlmv2andv3" VisionEncoderDecoder = "vision_encoder_decoder" @add_end_docstrings(PIPELINE_INIT_ARGS) class DocumentQuestionAnsweringPipeline(ChunkPipeline): # TODO: Update task_summary docs to include an example with document QA and then update the first sentence """ Document Question Answering pipeline using any `AutoModelForDocumentQuestionAnswering`. The inputs/outputs are similar to the (extractive) question answering pipeline; however, the pipeline takes an image (and optional OCR'd words/boxes) as input instead of text context. Example: ```python >>> from transformers import pipeline >>> document_qa = pipeline(model="impira/layoutlm-document-qa") >>> document_qa( ... image="https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png", ... question="What is the invoice number?", ... ) [{'score': 0.425, 'answer': 'us-001', 'start': 16, 'end': 16}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This document question answering pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"document-question-answering"`. The models that this pipeline can use are models that have been fine-tuned on a document question answering task. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=document-question-answering). """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.tokenizer is not None and not self.tokenizer.__class__.__name__.endswith("Fast"): raise ValueError( "`DocumentQuestionAnsweringPipeline` requires a fast tokenizer, but a slow tokenizer " f"(`{self.tokenizer.__class__.__name__}`) is provided." ) if self.model.config.__class__.__name__ == "VisionEncoderDecoderConfig": self.model_type = ModelType.VisionEncoderDecoder if self.model.config.encoder.model_type != "donut-swin": raise ValueError("Currently, the only supported VisionEncoderDecoder model is Donut") else: self.check_model_type(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING) if self.model.config.__class__.__name__ == "LayoutLMConfig": self.model_type = ModelType.LayoutLM else: self.model_type = ModelType.LayoutLMv2andv3 def _sanitize_parameters( self, padding=None, doc_stride=None, max_question_len=None, lang: Optional[str] = None, tesseract_config: Optional[str] = None, max_answer_len=None, max_seq_len=None, top_k=None, handle_impossible_answer=None, **kwargs, ): preprocess_params, postprocess_params = {}, {} if padding is not None: preprocess_params["padding"] = padding if doc_stride is not None: preprocess_params["doc_stride"] = doc_stride if max_question_len is not None: preprocess_params["max_question_len"] = max_question_len if max_seq_len is not None: preprocess_params["max_seq_len"] = max_seq_len if lang is not None: preprocess_params["lang"] = lang if tesseract_config is not None: preprocess_params["tesseract_config"] = tesseract_config if top_k is not None: if top_k < 1: raise ValueError(f"top_k parameter should be >= 1 (got {top_k})") postprocess_params["top_k"] = top_k if max_answer_len is not None: if max_answer_len < 1: raise ValueError(f"max_answer_len parameter should be >= 1 (got {max_answer_len}") postprocess_params["max_answer_len"] = max_answer_len if handle_impossible_answer is not None: postprocess_params["handle_impossible_answer"] = handle_impossible_answer return preprocess_params, {}, postprocess_params def __call__( self, image: Union["Image.Image", str], question: Optional[str] = None, word_boxes: Tuple[str, List[float]] = None, **kwargs, ): """ Answer the question(s) given as inputs by using the document(s). A document is defined as an image and an optional list of (word, box) tuples which represent the text in the document. If the `word_boxes` are not provided, it will use the Tesseract OCR engine (if available) to extract the words and boxes automatically for LayoutLM-like models which require them as input. For Donut, no OCR is run. You can invoke the pipeline several ways: - `pipeline(image=image, question=question)` - `pipeline(image=image, question=question, word_boxes=word_boxes)` - `pipeline([{"image": image, "question": question}])` - `pipeline([{"image": image, "question": question, "word_boxes": word_boxes}])` Args: image (`str` or `PIL.Image`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images. If given a single image, it can be broadcasted to multiple questions. question (`str`): A question to ask of the document. word_boxes (`List[str, Tuple[float, float, float, float]]`, *optional*): A list of words and bounding boxes (normalized 0->1000). If you provide this optional input, then the pipeline will use these words and boxes instead of running OCR on the image to derive them for models that need them (e.g. LayoutLM). This allows you to reuse OCR'd results across many invocations of the pipeline without having to re-run it each time. top_k (`int`, *optional*, defaults to 1): The number of answers to return (will be chosen by order of likelihood). Note that we return less than top_k answers if there are not enough options available within the context. doc_stride (`int`, *optional*, defaults to 128): If the words in the document are too long to fit with the question for the model, it will be split in several chunks with some overlap. This argument controls the size of that overlap. max_answer_len (`int`, *optional*, defaults to 15): The maximum length of predicted answers (e.g., only answers with a shorter length are considered). max_seq_len (`int`, *optional*, defaults to 384): The maximum length of the total sentence (context + question) in tokens of each chunk passed to the model. The context will be split in several chunks (using `doc_stride` as overlap) if needed. max_question_len (`int`, *optional*, defaults to 64): The maximum length of the question after tokenization. It will be truncated if needed. handle_impossible_answer (`bool`, *optional*, defaults to `False`): Whether or not we accept impossible as an answer. lang (`str`, *optional*): Language to use while running OCR. Defaults to english. tesseract_config (`str`, *optional*): Additional flags to pass to tesseract while running OCR. Return: A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys: - **score** (`float`) -- The probability associated to the answer. - **start** (`int`) -- The start word index of the answer (in the OCR'd version of the input or provided `word_boxes`). - **end** (`int`) -- The end word index of the answer (in the OCR'd version of the input or provided `word_boxes`). - **answer** (`str`) -- The answer to the question. - **words** (`list[int]`) -- The index of each word/box pair that is in the answer """ if isinstance(question, str): inputs = {"question": question, "image": image} if word_boxes is not None: inputs["word_boxes"] = word_boxes else: inputs = image return super().__call__(inputs, **kwargs) def preprocess( self, input, padding="do_not_pad", doc_stride=None, max_seq_len=None, word_boxes: Tuple[str, List[float]] = None, lang=None, tesseract_config="", ): # NOTE: This code mirrors the code in question answering and will be implemented in a follow up PR # to support documents with enough tokens that overflow the model's window if max_seq_len is None: max_seq_len = self.tokenizer.model_max_length if doc_stride is None: doc_stride = min(max_seq_len // 2, 256) image = None image_features = {} if input.get("image", None) is not None: image = load_image(input["image"]) if self.image_processor is not None: image_features.update(self.image_processor(images=image, return_tensors=self.framework)) elif self.feature_extractor is not None: image_features.update(self.feature_extractor(images=image, return_tensors=self.framework)) elif self.model_type == ModelType.VisionEncoderDecoder: raise ValueError("If you are using a VisionEncoderDecoderModel, you must provide a feature extractor") words, boxes = None, None if not self.model_type == ModelType.VisionEncoderDecoder: if "word_boxes" in input: words = [x[0] for x in input["word_boxes"]] boxes = [x[1] for x in input["word_boxes"]] elif "words" in image_features and "boxes" in image_features: words = image_features.pop("words")[0] boxes = image_features.pop("boxes")[0] elif image is not None: if not TESSERACT_LOADED: raise ValueError( "If you provide an image without word_boxes, then the pipeline will run OCR using Tesseract," " but pytesseract is not available" ) if TESSERACT_LOADED: words, boxes = apply_tesseract(image, lang=lang, tesseract_config=tesseract_config) else: raise ValueError( "You must provide an image or word_boxes. If you provide an image, the pipeline will automatically" " run OCR to derive words and boxes" ) if self.tokenizer.padding_side != "right": raise ValueError( "Document question answering only supports tokenizers whose padding side is 'right', not" f" {self.tokenizer.padding_side}" ) if self.model_type == ModelType.VisionEncoderDecoder: task_prompt = f'<s_docvqa><s_question>{input["question"]}</s_question><s_answer>' # Adapted from https://huggingface.co/spaces/nielsr/donut-docvqa/blob/main/app.py encoding = { "inputs": image_features["pixel_values"], "decoder_input_ids": self.tokenizer( task_prompt, add_special_tokens=False, return_tensors=self.framework ).input_ids, "return_dict_in_generate": True, } yield { **encoding, "p_mask": None, "word_ids": None, "words": None, "output_attentions": True, "is_last": True, } else: tokenizer_kwargs = {} if self.model_type == ModelType.LayoutLM: tokenizer_kwargs["text"] = input["question"].split() tokenizer_kwargs["text_pair"] = words tokenizer_kwargs["is_split_into_words"] = True else: tokenizer_kwargs["text"] = [input["question"]] tokenizer_kwargs["text_pair"] = [words] tokenizer_kwargs["boxes"] = [boxes] encoding = self.tokenizer( padding=padding, max_length=max_seq_len, stride=doc_stride, return_token_type_ids=True, truncation="only_second", return_overflowing_tokens=True, **tokenizer_kwargs, ) # TODO: check why slower `LayoutLMTokenizer` and `LayoutLMv2Tokenizer` don't have this key in outputs # FIXME: ydshieh and/or Narsil encoding.pop("overflow_to_sample_mapping", None) # We do not use this num_spans = len(encoding["input_ids"]) # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer) # We put 0 on the tokens from the context and 1 everywhere else (question and special tokens) # This logic mirrors the logic in the question_answering pipeline p_mask = [[tok != 1 for tok in encoding.sequence_ids(span_id)] for span_id in range(num_spans)] for span_idx in range(num_spans): if self.framework == "pt": span_encoding = {k: torch.tensor(v[span_idx : span_idx + 1]) for (k, v) in encoding.items()} if "pixel_values" in image_features: span_encoding["image"] = image_features["pixel_values"] else: raise ValueError("Unsupported: Tensorflow preprocessing for DocumentQuestionAnsweringPipeline") input_ids_span_idx = encoding["input_ids"][span_idx] # keep the cls_token unmasked (some models use it to indicate unanswerable questions) if self.tokenizer.cls_token_id is not None: cls_indices = np.nonzero(np.array(input_ids_span_idx) == self.tokenizer.cls_token_id)[0] for cls_index in cls_indices: p_mask[span_idx][cls_index] = 0 # For each span, place a bounding box [0,0,0,0] for question and CLS tokens, [1000,1000,1000,1000] # for SEP tokens, and the word's bounding box for words in the original document. if "boxes" not in tokenizer_kwargs: bbox = [] for input_id, sequence_id, word_id in zip( encoding.input_ids[span_idx], encoding.sequence_ids(span_idx), encoding.word_ids(span_idx), ): if sequence_id == 1: bbox.append(boxes[word_id]) elif input_id == self.tokenizer.sep_token_id: bbox.append([1000] * 4) else: bbox.append([0] * 4) if self.framework == "pt": span_encoding["bbox"] = torch.tensor(bbox).unsqueeze(0) elif self.framework == "tf": raise ValueError("Unsupported: Tensorflow preprocessing for DocumentQuestionAnsweringPipeline") yield { **span_encoding, "p_mask": p_mask[span_idx], "word_ids": encoding.word_ids(span_idx), "words": words, "is_last": span_idx == num_spans - 1, } def _forward(self, model_inputs): p_mask = model_inputs.pop("p_mask", None) word_ids = model_inputs.pop("word_ids", None) words = model_inputs.pop("words", None) is_last = model_inputs.pop("is_last", False) if self.model_type == ModelType.VisionEncoderDecoder: model_outputs = self.model.generate(**model_inputs) else: model_outputs = self.model(**model_inputs) model_outputs = dict(model_outputs.items()) model_outputs["p_mask"] = p_mask model_outputs["word_ids"] = word_ids model_outputs["words"] = words model_outputs["attention_mask"] = model_inputs.get("attention_mask", None) model_outputs["is_last"] = is_last return model_outputs def postprocess(self, model_outputs, top_k=1, **kwargs): if self.model_type == ModelType.VisionEncoderDecoder: answers = [self.postprocess_encoder_decoder_single(o) for o in model_outputs] else: answers = self.postprocess_extractive_qa(model_outputs, top_k=top_k, **kwargs) answers = sorted(answers, key=lambda x: x.get("score", 0), reverse=True)[:top_k] return answers def postprocess_encoder_decoder_single(self, model_outputs, **kwargs): sequence = self.tokenizer.batch_decode(model_outputs["sequences"])[0] # TODO: A lot of this logic is specific to Donut and should probably be handled in the tokenizer # (see https://github.com/huggingface/transformers/pull/18414/files#r961747408 for more context). sequence = sequence.replace(self.tokenizer.eos_token, "").replace(self.tokenizer.pad_token, "") sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token ret = { "answer": None, } answer = re.search(r"<s_answer>(.*)</s_answer>", sequence) if answer is not None: ret["answer"] = answer.group(1).strip() return ret def postprocess_extractive_qa( self, model_outputs, top_k=1, handle_impossible_answer=False, max_answer_len=15, **kwargs ): min_null_score = 1000000 # large and positive answers = [] for output in model_outputs: words = output["words"] starts, ends, scores, min_null_score = select_starts_ends( start=output["start_logits"], end=output["end_logits"], p_mask=output["p_mask"], attention_mask=output["attention_mask"].numpy() if output.get("attention_mask", None) is not None else None, min_null_score=min_null_score, top_k=top_k, handle_impossible_answer=handle_impossible_answer, max_answer_len=max_answer_len, ) word_ids = output["word_ids"] for start, end, score in zip(starts, ends, scores): word_start, word_end = word_ids[start], word_ids[end] if word_start is not None and word_end is not None: answers.append( { "score": float(score), "answer": " ".join(words[word_start : word_end + 1]), "start": word_start, "end": word_end, } ) if handle_impossible_answer: answers.append({"score": min_null_score, "answer": "", "start": 0, "end": 0}) return answers
23,079
45.532258
128
py
transformers
transformers-main/src/transformers/pipelines/audio_classification.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import subprocess from typing import Union import numpy as np import requests from ..utils import add_end_docstrings, is_torch_available, is_torchaudio_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING logger = logging.get_logger(__name__) def ffmpeg_read(bpayload: bytes, sampling_rate: int) -> np.array: """ Helper function to read an audio file through ffmpeg. """ ar = f"{sampling_rate}" ac = "1" format_for_conversion = "f32le" ffmpeg_command = [ "ffmpeg", "-i", "pipe:0", "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-hide_banner", "-loglevel", "quiet", "pipe:1", ] try: ffmpeg_process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE) except FileNotFoundError: raise ValueError("ffmpeg was not found but is required to load audio files from filename") output_stream = ffmpeg_process.communicate(bpayload) out_bytes = output_stream[0] audio = np.frombuffer(out_bytes, np.float32) if audio.shape[0] == 0: raise ValueError("Malformed soundfile") return audio @add_end_docstrings(PIPELINE_INIT_ARGS) class AudioClassificationPipeline(Pipeline): """ Audio classification pipeline using any `AutoModelForAudioClassification`. This pipeline predicts the class of a raw waveform or an audio file. In case of an audio file, ffmpeg should be installed to support multiple audio formats. Example: ```python >>> from transformers import pipeline >>> classifier = pipeline(model="superb/wav2vec2-base-superb-ks") >>> classifier("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac") [{'score': 0.997, 'label': '_unknown_'}, {'score': 0.002, 'label': 'left'}, {'score': 0.0, 'label': 'yes'}, {'score': 0.0, 'label': 'down'}, {'score': 0.0, 'label': 'stop'}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"audio-classification"`. See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=audio-classification). """ def __init__(self, *args, **kwargs): # Default, might be overriden by the model.config. kwargs["top_k"] = 5 super().__init__(*args, **kwargs) if self.framework != "pt": raise ValueError(f"The {self.__class__} is only available in PyTorch.") self.check_model_type(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING) def __call__( self, inputs: Union[np.ndarray, bytes, str], **kwargs, ): """ Classify the sequence(s) given as inputs. See the [`AutomaticSpeechRecognitionPipeline`] documentation for more information. Args: inputs (`np.ndarray` or `bytes` or `str` or `dict`): The inputs is either : - `str` that is the filename of the audio file, the file will be read at the correct sampling rate to get the waveform using *ffmpeg*. This requires *ffmpeg* to be installed on the system. - `bytes` it is supposed to be the content of an audio file and is interpreted by *ffmpeg* in the same way. - (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`) Raw audio at the correct sampling rate (no further check will be done) - `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this pipeline do the resampling. The dict must be either be in the format `{"sampling_rate": int, "raw": np.array}`, or `{"sampling_rate": int, "array": np.array}`, where the key `"raw"` or `"array"` is used to denote the raw audio waveform. top_k (`int`, *optional*, defaults to None): The number of top labels that will be returned by the pipeline. If the provided number is `None` or higher than the number of labels available in the model configuration, it will default to the number of labels. Return: A list of `dict` with the following keys: - **label** (`str`) -- The label predicted. - **score** (`float`) -- The corresponding probability. """ return super().__call__(inputs, **kwargs) def _sanitize_parameters(self, top_k=None, **kwargs): # No parameters on this pipeline right now postprocess_params = {} if top_k is not None: if top_k > self.model.config.num_labels: top_k = self.model.config.num_labels postprocess_params["top_k"] = top_k return {}, {}, postprocess_params def preprocess(self, inputs): if isinstance(inputs, str): if inputs.startswith("http://") or inputs.startswith("https://"): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png inputs = requests.get(inputs).content else: with open(inputs, "rb") as f: inputs = f.read() if isinstance(inputs, bytes): inputs = ffmpeg_read(inputs, self.feature_extractor.sampling_rate) if isinstance(inputs, dict): # Accepting `"array"` which is the key defined in `datasets` for # better integration if not ("sampling_rate" in inputs and ("raw" in inputs or "array" in inputs)): raise ValueError( "When passing a dictionary to AudioClassificationPipeline, the dict needs to contain a " '"raw" key containing the numpy array representing the audio and a "sampling_rate" key, ' "containing the sampling_rate associated with that array" ) _inputs = inputs.pop("raw", None) if _inputs is None: # Remove path which will not be used from `datasets`. inputs.pop("path", None) _inputs = inputs.pop("array", None) in_sampling_rate = inputs.pop("sampling_rate") inputs = _inputs if in_sampling_rate != self.feature_extractor.sampling_rate: import torch if is_torchaudio_available(): from torchaudio import functional as F else: raise ImportError( "torchaudio is required to resample audio samples in AudioClassificationPipeline. " "The torchaudio package can be installed through: `pip install torchaudio`." ) inputs = F.resample( torch.from_numpy(inputs), in_sampling_rate, self.feature_extractor.sampling_rate ).numpy() if not isinstance(inputs, np.ndarray): raise ValueError("We expect a numpy ndarray as input") if len(inputs.shape) != 1: raise ValueError("We expect a single channel audio input for AudioClassificationPipeline") processed = self.feature_extractor( inputs, sampling_rate=self.feature_extractor.sampling_rate, return_tensors="pt" ) return processed def _forward(self, model_inputs): model_outputs = self.model(**model_inputs) return model_outputs def postprocess(self, model_outputs, top_k=5): probs = model_outputs.logits[0].softmax(-1) scores, ids = probs.topk(top_k) scores = scores.tolist() ids = ids.tolist() labels = [{"score": score, "label": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)] return labels
8,769
39.601852
177
py
transformers
transformers-main/src/transformers/pipelines/text2text_generation.py
import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING logger = logging.get_logger(__name__) class ReturnType(enum.Enum): TENSORS = 0 TEXT = 1 @add_end_docstrings(PIPELINE_INIT_ARGS) class Text2TextGenerationPipeline(Pipeline): """ Pipeline for text to text generation using seq2seq models. Example: ```python >>> from transformers import pipeline >>> generator = pipeline(model="mrm8488/t5-base-finetuned-question-generation-ap") >>> generator( ... "answer: Manuel context: Manuel has created RuPERTa-base with the support of HF-Transformers and Google" ... ) [{'generated_text': 'question: Who created the RuPERTa-base?'}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This Text2TextGenerationPipeline pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"text2text-generation"`. The models that this pipeline can use are models that have been fine-tuned on a translation task. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=text2text-generation). For a list of available parameters, see the [following documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate) Usage: ```python text2text_generator = pipeline("text2text-generation") text2text_generator("question: What is 42 ? context: 42 is the answer to life, the universe and everything") ```""" # Used in the return key of the pipeline. return_name = "generated" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING ) def _sanitize_parameters( self, return_tensors=None, return_text=None, return_type=None, clean_up_tokenization_spaces=None, truncation=None, stop_sequence=None, **generate_kwargs, ): preprocess_params = {} if truncation is not None: preprocess_params["truncation"] = truncation forward_params = generate_kwargs postprocess_params = {} if return_tensors is not None and return_type is None: return_type = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: postprocess_params["return_type"] = return_type if clean_up_tokenization_spaces is not None: postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces if stop_sequence is not None: stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False) if len(stop_sequence_ids) > 1: warnings.warn( "Stopping on a multiple token sequence is not yet supported on transformers. The first token of" " the stop sequence will be used as the stop sequence string in the interim." ) generate_kwargs["eos_token_id"] = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def check_inputs(self, input_length: int, min_length: int, max_length: int): """ Checks whether there might be something wrong with given input with regard to the model. """ return True def _parse_and_tokenize(self, *args, truncation): prefix = self.model.config.prefix if self.model.config.prefix is not None else "" if isinstance(args[0], list): if self.tokenizer.pad_token_id is None: raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input") args = ([prefix + arg for arg in args[0]],) padding = True elif isinstance(args[0], str): args = (prefix + args[0],) padding = False else: raise ValueError( f" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`" ) inputs = self.tokenizer(*args, padding=padding, truncation=truncation, return_tensors=self.framework) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__(self, *args, **kwargs): r""" Generate the output text(s) using text(s) given as inputs. Args: args (`str` or `List[str]`): Input text for the encoder. return_tensors (`bool`, *optional*, defaults to `False`): Whether or not to include the tensors of predictions (as token indices) in the outputs. return_text (`bool`, *optional*, defaults to `True`): Whether or not to include the decoded texts in the outputs. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to clean up the potential extra spaces in the text output. truncation (`TruncationStrategy`, *optional*, defaults to `TruncationStrategy.DO_NOT_TRUNCATE`): The truncation strategy for the tokenization within the pipeline. `TruncationStrategy.DO_NOT_TRUNCATE` (default) will never truncate, but it is sometimes desirable to truncate the input to fit the model's max_length instead of throwing an error down the line. generate_kwargs: Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework [here](./model#generative-models)). Return: A list or a list of list of `dict`: Each result comes as a dictionary with the following keys: - **generated_text** (`str`, present when `return_text=True`) -- The generated text. - **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token ids of the generated text. """ result = super().__call__(*args, **kwargs) if ( isinstance(args[0], list) and all(isinstance(el, str) for el in args[0]) and all(len(res) == 1 for res in result) ): return [res[0] for res in result] return result def preprocess(self, inputs, truncation=TruncationStrategy.DO_NOT_TRUNCATE, **kwargs): inputs = self._parse_and_tokenize(inputs, truncation=truncation, **kwargs) return inputs def _forward(self, model_inputs, **generate_kwargs): if self.framework == "pt": in_b, input_length = model_inputs["input_ids"].shape elif self.framework == "tf": in_b, input_length = tf.shape(model_inputs["input_ids"]).numpy() generate_kwargs["min_length"] = generate_kwargs.get("min_length", self.model.config.min_length) generate_kwargs["max_length"] = generate_kwargs.get("max_length", self.model.config.max_length) self.check_inputs(input_length, generate_kwargs["min_length"], generate_kwargs["max_length"]) output_ids = self.model.generate(**model_inputs, **generate_kwargs) out_b = output_ids.shape[0] if self.framework == "pt": output_ids = output_ids.reshape(in_b, out_b // in_b, *output_ids.shape[1:]) elif self.framework == "tf": output_ids = tf.reshape(output_ids, (in_b, out_b // in_b, *output_ids.shape[1:])) return {"output_ids": output_ids} def postprocess(self, model_outputs, return_type=ReturnType.TEXT, clean_up_tokenization_spaces=False): records = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: record = {f"{self.return_name}_token_ids": output_ids} elif return_type == ReturnType.TEXT: record = { f"{self.return_name}_text": self.tokenizer.decode( output_ids, skip_special_tokens=True, clean_up_tokenization_spaces=clean_up_tokenization_spaces, ) } records.append(record) return records @add_end_docstrings(PIPELINE_INIT_ARGS) class SummarizationPipeline(Text2TextGenerationPipeline): """ Summarize news articles and other documents. This summarizing pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"summarization"`. The models that this pipeline can use are models that have been fine-tuned on a summarization task, which is currently, '*bart-large-cnn*', '*t5-small*', '*t5-base*', '*t5-large*', '*t5-3b*', '*t5-11b*'. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=summarization). For a list of available parameters, see the [following documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate) Usage: ```python # use bart in pytorch summarizer = pipeline("summarization") summarizer("An apple a day, keeps the doctor away", min_length=5, max_length=20) # use t5 in tf summarizer = pipeline("summarization", model="t5-base", tokenizer="t5-base", framework="tf") summarizer("An apple a day, keeps the doctor away", min_length=5, max_length=20) ```""" # Used in the return key of the pipeline. return_name = "summary" def __call__(self, *args, **kwargs): r""" Summarize the text(s) given as inputs. Args: documents (*str* or `List[str]`): One or several articles (or one list of articles) to summarize. return_text (`bool`, *optional*, defaults to `True`): Whether or not to include the decoded texts in the outputs return_tensors (`bool`, *optional*, defaults to `False`): Whether or not to include the tensors of predictions (as token indices) in the outputs. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to clean up the potential extra spaces in the text output. generate_kwargs: Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework [here](./model#generative-models)). Return: A list or a list of list of `dict`: Each result comes as a dictionary with the following keys: - **summary_text** (`str`, present when `return_text=True`) -- The summary of the corresponding input. - **summary_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token ids of the summary. """ return super().__call__(*args, **kwargs) def check_inputs(self, input_length: int, min_length: int, max_length: int) -> bool: """ Checks whether there might be something wrong with given input with regard to the model. """ if max_length < min_length: logger.warning(f"Your min_length={min_length} must be inferior than your max_length={max_length}.") if input_length < max_length: logger.warning( f"Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is " "a summarization task, where outputs shorter than the input are typically wanted, you might " f"consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})" ) @add_end_docstrings(PIPELINE_INIT_ARGS) class TranslationPipeline(Text2TextGenerationPipeline): """ Translates from one language to another. This translation pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"translation_xx_to_yy"`. The models that this pipeline can use are models that have been fine-tuned on a translation task. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=translation). For a list of available parameters, see the [following documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate) Usage: ```python en_fr_translator = pipeline("translation_en_to_fr") en_fr_translator("How old are you?") ```""" # Used in the return key of the pipeline. return_name = "translation" def check_inputs(self, input_length: int, min_length: int, max_length: int): if input_length > 0.9 * max_length: logger.warning( f"Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider " "increasing your max_length manually, e.g. translator('...', max_length=400)" ) return True def preprocess(self, *args, truncation=TruncationStrategy.DO_NOT_TRUNCATE, src_lang=None, tgt_lang=None): if getattr(self.tokenizer, "_build_translation_inputs", None): return self.tokenizer._build_translation_inputs( *args, return_tensors=self.framework, truncation=truncation, src_lang=src_lang, tgt_lang=tgt_lang ) else: return super()._parse_and_tokenize(*args, truncation=truncation) def _sanitize_parameters(self, src_lang=None, tgt_lang=None, **kwargs): preprocess_params, forward_params, postprocess_params = super()._sanitize_parameters(**kwargs) if src_lang is not None: preprocess_params["src_lang"] = src_lang if tgt_lang is not None: preprocess_params["tgt_lang"] = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. task = kwargs.get("task", self.task) items = task.split("_") if task and len(items) == 4: # translation, XX, to YY preprocess_params["src_lang"] = items[1] preprocess_params["tgt_lang"] = items[3] return preprocess_params, forward_params, postprocess_params def __call__(self, *args, **kwargs): r""" Translate the text(s) given as inputs. Args: args (`str` or `List[str]`): Texts to be translated. return_tensors (`bool`, *optional*, defaults to `False`): Whether or not to include the tensors of predictions (as token indices) in the outputs. return_text (`bool`, *optional*, defaults to `True`): Whether or not to include the decoded texts in the outputs. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to clean up the potential extra spaces in the text output. src_lang (`str`, *optional*): The language of the input. Might be required for multilingual models. Will not have any effect for single pair translation models tgt_lang (`str`, *optional*): The language of the desired output. Might be required for multilingual models. Will not have any effect for single pair translation models generate_kwargs: Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework [here](./model#generative-models)). Return: A list or a list of list of `dict`: Each result comes as a dictionary with the following keys: - **translation_text** (`str`, present when `return_text=True`) -- The translation. - **translation_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token ids of the translation. """ return super().__call__(*args, **kwargs)
16,875
44.858696
141
py
transformers
transformers-main/src/transformers/pipelines/conversational.py
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch logger = logging.get_logger(__name__) class Conversation: """ Utility class containing a conversation and its history. This class is meant to be used as an input to the [`ConversationalPipeline`]. The conversation contains several utility functions to manage the addition of new user inputs and generated model responses. A conversation needs to contain an unprocessed user input before being passed to the [`ConversationalPipeline`]. This user input is either created when the class is instantiated, or by calling `conversational_pipeline.append_response("input")` after a conversation turn. Arguments: text (`str`, *optional*): The initial user input to start the conversation. If not provided, a user input needs to be provided manually using the [`~Conversation.add_user_input`] method before the conversation can begin. conversation_id (`uuid.UUID`, *optional*): Unique identifier for the conversation. If not provided, a random UUID4 id will be assigned to the conversation. past_user_inputs (`List[str]`, *optional*): Eventual past history of the conversation of the user. You don't need to pass it manually if you use the pipeline interactively but if you want to recreate history you need to set both `past_user_inputs` and `generated_responses` with equal length lists of strings generated_responses (`List[str]`, *optional*): Eventual past history of the conversation of the model. You don't need to pass it manually if you use the pipeline interactively but if you want to recreate history you need to set both `past_user_inputs` and `generated_responses` with equal length lists of strings Usage: ```python conversation = Conversation("Going to the movies tonight - any suggestions?") # Steps usually performed by the model when generating a response: # 1. Mark the user input as processed (moved to the history) conversation.mark_processed() # 2. Append a mode response conversation.append_response("The Big lebowski.") conversation.add_user_input("Is it good?") ```""" def __init__( self, text: str = None, conversation_id: uuid.UUID = None, past_user_inputs=None, generated_responses=None ): if not conversation_id: conversation_id = uuid.uuid4() if past_user_inputs is None: past_user_inputs = [] if generated_responses is None: generated_responses = [] self.uuid: uuid.UUID = conversation_id self.past_user_inputs: List[str] = past_user_inputs self.generated_responses: List[str] = generated_responses self.new_user_input: Optional[str] = text def __eq__(self, other): if not isinstance(other, Conversation): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def add_user_input(self, text: str, overwrite: bool = False): """ Add a user input to the conversation for the next round. This populates the internal `new_user_input` field. Args: text (`str`): The user input for the next conversation round. overwrite (`bool`, *optional*, defaults to `False`): Whether or not existing and unprocessed user input should be overwritten when this function is called. """ if self.new_user_input: if overwrite: logger.warning( f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ' f'with: "{text}".' ) self.new_user_input = text else: logger.warning( f'User input added while unprocessed input was existing: "{self.new_user_input}" new input ' f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' ) else: self.new_user_input = text def mark_processed(self): """ Mark the conversation as processed (moves the content of `new_user_input` to `past_user_inputs`) and empties the `new_user_input` field. """ if self.new_user_input: self.past_user_inputs.append(self.new_user_input) self.new_user_input = None def append_response(self, response: str): """ Append a response to the list of generated responses. Args: response (`str`): The model generated response. """ self.generated_responses.append(response) def iter_texts(self): """ Iterates over all blobs of the conversation. Returns: Iterator of (is_user, text_chunk) in chronological order of the conversation. `is_user` is a `bool`, `text_chunks` is a `str`. """ for user_input, generated_response in zip(self.past_user_inputs, self.generated_responses): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__(self): """ Generates a string representation of the conversation. Return: `str`: Example: Conversation id: 7d15686b-dc94-49f2-9c4b-c9eac6a1f114 user >> Going to the movies tonight - any suggestions? bot >> The Big Lebowski """ output = f"Conversation id: {self.uuid} \n" for is_user, text in self.iter_texts(): name = "user" if is_user else "bot" output += f"{name} >> {text} \n" return output @add_end_docstrings( PIPELINE_INIT_ARGS, r""" min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. """, ) class ConversationalPipeline(Pipeline): """ Multi-turn conversational pipeline. Example: ```python >>> from transformers import pipeline, Conversation >>> chatbot = pipeline(model="microsoft/DialoGPT-medium") >>> conversation = Conversation("Going to the movies tonight - any suggestions?") >>> conversation = chatbot(conversation) >>> conversation.generated_responses[-1] 'The Big Lebowski' >>> conversation.add_user_input("Is it an action movie?") >>> conversation = chatbot(conversation) >>> conversation.generated_responses[-1] "It's a comedy." ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This conversational pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"conversational"`. The models that this pipeline can use are models that have been fine-tuned on a multi-turn conversational task, currently: *'microsoft/DialoGPT-small'*, *'microsoft/DialoGPT-medium'*, *'microsoft/DialoGPT-large'*. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=conversational). """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.tokenizer.pad_token_id is None: self.tokenizer.pad_token = self.tokenizer.eos_token def _sanitize_parameters( self, min_length_for_response=None, minimum_tokens=None, clean_up_tokenization_spaces=None, **generate_kwargs ): preprocess_params = {} forward_params = {} postprocess_params = {} if min_length_for_response is not None: preprocess_params["min_length_for_response"] = min_length_for_response if minimum_tokens is not None: forward_params["minimum_tokens"] = minimum_tokens if "max_length" in generate_kwargs: forward_params["max_length"] = generate_kwargs["max_length"] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(generate_kwargs) return preprocess_params, forward_params, postprocess_params def __call__(self, conversations: Union[Conversation, List[Conversation]], num_workers=0, **kwargs): r""" Generate responses for the conversation(s) given as inputs. Args: conversations (a [`Conversation`] or a list of [`Conversation`]): Conversations to generate responses for. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to clean up the potential extra spaces in the text output. generate_kwargs: Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework [here](./model#generative-models)). Returns: [`Conversation`] or a list of [`Conversation`]: Conversation(s) with updated generated responses for those containing a new user input. """ # XXX: num_workers==0 is required to be backward compatible # Otherwise the threads will require a Conversation copy. # This will definitely hinder performance on GPU, but has to be opted # in because of this BC change. outputs = super().__call__(conversations, num_workers=num_workers, **kwargs) if isinstance(outputs, list) and len(outputs) == 1: return outputs[0] return outputs def preprocess(self, conversation: Conversation, min_length_for_response=32) -> Dict[str, Any]: if not isinstance(conversation, Conversation): raise ValueError("ConversationalPipeline, expects Conversation as inputs") if conversation.new_user_input is None: raise ValueError( f"Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. " "Add user inputs with the conversation's `add_user_input` method" ) if hasattr(self.tokenizer, "_build_conversation_input_ids"): input_ids = self.tokenizer._build_conversation_input_ids(conversation) else: # If the tokenizer cannot handle conversations, we default to only the old version input_ids = self._legacy_parse_and_tokenize(conversation) if self.framework == "pt": input_ids = torch.LongTensor([input_ids]) elif self.framework == "tf": input_ids = tf.constant([input_ids]) return {"input_ids": input_ids, "conversation": conversation} def _forward(self, model_inputs, minimum_tokens=10, **generate_kwargs): max_length = generate_kwargs.get("max_length", self.model.config.max_length) n = model_inputs["input_ids"].shape[1] if max_length - minimum_tokens < n: logger.warning(f"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})") trim = max_length - minimum_tokens model_inputs["input_ids"] = model_inputs["input_ids"][:, -trim:] if "attention_mask" in model_inputs: model_inputs["attention_mask"] = model_inputs["attention_mask"][:, -trim:] conversation = model_inputs.pop("conversation") generate_kwargs["max_length"] = max_length output_ids = self.model.generate(**model_inputs, **generate_kwargs) if self.model.config.is_encoder_decoder: start_position = 1 else: start_position = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def postprocess(self, model_outputs, clean_up_tokenization_spaces=True): output_ids = model_outputs["output_ids"] answer = self.tokenizer.decode( output_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=clean_up_tokenization_spaces, ) conversation = model_outputs["conversation"] conversation.mark_processed() conversation.append_response(answer) return conversation def _legacy_parse_and_tokenize(self, conversation: Conversation) -> Dict: eos_token_id = self.tokenizer.eos_token_id input_ids = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(text, add_special_tokens=False) + [eos_token_id]) else: input_ids.extend(self.tokenizer.encode(text, add_special_tokens=False)) if len(input_ids) > self.tokenizer.model_max_length: input_ids = input_ids[-self.tokenizer.model_max_length :] return input_ids
13,594
42.713826
119
py
transformers
transformers-main/src/transformers/pipelines/table_question_answering.py
import collections import types import numpy as np from ..utils import ( add_end_docstrings, is_tensorflow_probability_available, is_tf_available, is_torch_available, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ArgumentHandler, Dataset, Pipeline, PipelineException if is_torch_available(): import torch from ..models.auto.modeling_auto import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, ) if is_tf_available() and is_tensorflow_probability_available(): import tensorflow as tf import tensorflow_probability as tfp from ..models.auto.modeling_tf_auto import ( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, ) class TableQuestionAnsweringArgumentHandler(ArgumentHandler): """ Handles arguments for the TableQuestionAnsweringPipeline """ def __call__(self, table=None, query=None, **kwargs): # Returns tqa_pipeline_inputs of shape: # [ # {"table": pd.DataFrame, "query": List[str]}, # ..., # {"table": pd.DataFrame, "query" : List[str]} # ] requires_backends(self, "pandas") import pandas as pd if table is None: raise ValueError("Keyword argument `table` cannot be None.") elif query is None: if isinstance(table, dict) and table.get("query") is not None and table.get("table") is not None: tqa_pipeline_inputs = [table] elif isinstance(table, list) and len(table) > 0: if not all(isinstance(d, dict) for d in table): raise ValueError( f"Keyword argument `table` should be a list of dict, but is {(type(d) for d in table)}" ) if table[0].get("query") is not None and table[0].get("table") is not None: tqa_pipeline_inputs = table else: raise ValueError( "If keyword argument `table` is a list of dictionaries, each dictionary should have a `table`" f" and `query` key, but only dictionary has keys {table[0].keys()} `table` and `query` keys." ) elif Dataset is not None and isinstance(table, Dataset) or isinstance(table, types.GeneratorType): return table else: raise ValueError( "Invalid input. Keyword argument `table` should be either of type `dict` or `list`, but " f"is {type(table)})" ) else: tqa_pipeline_inputs = [{"table": table, "query": query}] for tqa_pipeline_input in tqa_pipeline_inputs: if not isinstance(tqa_pipeline_input["table"], pd.DataFrame): if tqa_pipeline_input["table"] is None: raise ValueError("Table cannot be None.") tqa_pipeline_input["table"] = pd.DataFrame(tqa_pipeline_input["table"]) return tqa_pipeline_inputs @add_end_docstrings(PIPELINE_INIT_ARGS) class TableQuestionAnsweringPipeline(Pipeline): """ Table Question Answering pipeline using a `ModelForTableQuestionAnswering`. This pipeline is only available in PyTorch. Example: ```python >>> from transformers import pipeline >>> oracle = pipeline(model="google/tapas-base-finetuned-wtq") >>> table = { ... "Repository": ["Transformers", "Datasets", "Tokenizers"], ... "Stars": ["36542", "4512", "3934"], ... "Contributors": ["651", "77", "34"], ... "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], ... } >>> oracle(query="How many stars does the transformers repository have?", table=table) {'answer': 'AVERAGE > 36542', 'coordinates': [(0, 1)], 'cells': ['36542'], 'aggregator': 'AVERAGE'} ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This tabular question answering pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"table-question-answering"`. The models that this pipeline can use are models that have been fine-tuned on a tabular question answering task. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=table-question-answering). """ default_input_names = "table,query" def __init__(self, args_parser=TableQuestionAnsweringArgumentHandler(), *args, **kwargs): super().__init__(*args, **kwargs) self._args_parser = args_parser self.check_model_type( dict( TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING.items() + TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.items() ) if self.framework == "tf" else dict( MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING.items() + MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.items() ) ) self.aggregate = bool(getattr(self.model.config, "aggregation_labels", None)) and bool( getattr(self.model.config, "num_aggregation_labels", None) ) self.type = "tapas" if hasattr(self.model.config, "aggregation_labels") else None def batch_inference(self, **inputs): return self.model(**inputs) def sequential_inference(self, **inputs): """ Inference used for models that need to process sequences in a sequential fashion, like the SQA models which handle conversational query related to a table. """ if self.framework == "pt": all_logits = [] all_aggregations = [] prev_answers = None batch_size = inputs["input_ids"].shape[0] input_ids = inputs["input_ids"].to(self.device) attention_mask = inputs["attention_mask"].to(self.device) token_type_ids = inputs["token_type_ids"].to(self.device) token_type_ids_example = None for index in range(batch_size): # If sequences have already been processed, the token type IDs will be created according to the previous # answer. if prev_answers is not None: prev_labels_example = token_type_ids_example[:, 3] # shape (seq_len,) model_labels = np.zeros_like(prev_labels_example.cpu().numpy()) # shape (seq_len,) token_type_ids_example = token_type_ids[index] # shape (seq_len, 7) for i in range(model_labels.shape[0]): segment_id = token_type_ids_example[:, 0].tolist()[i] col_id = token_type_ids_example[:, 1].tolist()[i] - 1 row_id = token_type_ids_example[:, 2].tolist()[i] - 1 if row_id >= 0 and col_id >= 0 and segment_id == 1: model_labels[i] = int(prev_answers[(col_id, row_id)]) token_type_ids_example[:, 3] = torch.from_numpy(model_labels).type(torch.long).to(self.device) input_ids_example = input_ids[index] attention_mask_example = attention_mask[index] # shape (seq_len,) token_type_ids_example = token_type_ids[index] # shape (seq_len, 7) outputs = self.model( input_ids=input_ids_example.unsqueeze(0), attention_mask=attention_mask_example.unsqueeze(0), token_type_ids=token_type_ids_example.unsqueeze(0), ) logits = outputs.logits if self.aggregate: all_aggregations.append(outputs.logits_aggregation) all_logits.append(logits) dist_per_token = torch.distributions.Bernoulli(logits=logits) probabilities = dist_per_token.probs * attention_mask_example.type(torch.float32).to( dist_per_token.probs.device ) coords_to_probs = collections.defaultdict(list) for i, p in enumerate(probabilities.squeeze().tolist()): segment_id = token_type_ids_example[:, 0].tolist()[i] col = token_type_ids_example[:, 1].tolist()[i] - 1 row = token_type_ids_example[:, 2].tolist()[i] - 1 if col >= 0 and row >= 0 and segment_id == 1: coords_to_probs[(col, row)].append(p) prev_answers = {key: np.array(coords_to_probs[key]).mean() > 0.5 for key in coords_to_probs} logits_batch = torch.cat(tuple(all_logits), 0) return (logits_batch,) if not self.aggregate else (logits_batch, torch.cat(tuple(all_aggregations), 0)) else: all_logits = [] all_aggregations = [] prev_answers = None batch_size = inputs["input_ids"].shape[0] input_ids = inputs["input_ids"] attention_mask = inputs["attention_mask"] token_type_ids = inputs["token_type_ids"].numpy() token_type_ids_example = None for index in range(batch_size): # If sequences have already been processed, the token type IDs will be created according to the previous # answer. if prev_answers is not None: prev_labels_example = token_type_ids_example[:, 3] # shape (seq_len,) model_labels = np.zeros_like(prev_labels_example, dtype=np.int32) # shape (seq_len,) token_type_ids_example = token_type_ids[index] # shape (seq_len, 7) for i in range(model_labels.shape[0]): segment_id = token_type_ids_example[:, 0].tolist()[i] col_id = token_type_ids_example[:, 1].tolist()[i] - 1 row_id = token_type_ids_example[:, 2].tolist()[i] - 1 if row_id >= 0 and col_id >= 0 and segment_id == 1: model_labels[i] = int(prev_answers[(col_id, row_id)]) token_type_ids_example[:, 3] = model_labels input_ids_example = input_ids[index] attention_mask_example = attention_mask[index] # shape (seq_len,) token_type_ids_example = token_type_ids[index] # shape (seq_len, 7) outputs = self.model( input_ids=np.expand_dims(input_ids_example, axis=0), attention_mask=np.expand_dims(attention_mask_example, axis=0), token_type_ids=np.expand_dims(token_type_ids_example, axis=0), ) logits = outputs.logits if self.aggregate: all_aggregations.append(outputs.logits_aggregation) all_logits.append(logits) dist_per_token = tfp.distributions.Bernoulli(logits=logits) probabilities = dist_per_token.probs_parameter() * tf.cast(attention_mask_example, tf.float32) coords_to_probs = collections.defaultdict(list) token_type_ids_example = token_type_ids_example for i, p in enumerate(tf.squeeze(probabilities).numpy().tolist()): segment_id = token_type_ids_example[:, 0].tolist()[i] col = token_type_ids_example[:, 1].tolist()[i] - 1 row = token_type_ids_example[:, 2].tolist()[i] - 1 if col >= 0 and row >= 0 and segment_id == 1: coords_to_probs[(col, row)].append(p) prev_answers = {key: np.array(coords_to_probs[key]).mean() > 0.5 for key in coords_to_probs} logits_batch = tf.concat(tuple(all_logits), 0) return (logits_batch,) if not self.aggregate else (logits_batch, tf.concat(tuple(all_aggregations), 0)) def __call__(self, *args, **kwargs): r""" Answers queries according to a table. The pipeline accepts several types of inputs which are detailed below: - `pipeline(table, query)` - `pipeline(table, [query])` - `pipeline(table=table, query=query)` - `pipeline(table=table, query=[query])` - `pipeline({"table": table, "query": query})` - `pipeline({"table": table, "query": [query]})` - `pipeline([{"table": table, "query": query}, {"table": table, "query": query}])` The `table` argument should be a dict or a DataFrame built from that dict, containing the whole table: Example: ```python data = { "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], } ``` This dictionary can be passed in as such, or can be converted to a pandas DataFrame: Example: ```python import pandas as pd table = pd.DataFrame.from_dict(data) ``` Args: table (`pd.DataFrame` or `Dict`): Pandas DataFrame or dictionary that will be converted to a DataFrame containing all the table values. See above for an example of dictionary. query (`str` or `List[str]`): Query or list of queries that will be sent to the model alongside the table. sequential (`bool`, *optional*, defaults to `False`): Whether to do inference sequentially or as a batch. Batching is faster, but models like SQA require the inference to be done sequentially to extract relations within sequences, given their conversational nature. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`TapasTruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'drop_rows_to_fit'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate row by row, removing rows from the table. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). Return: A dictionary or a list of dictionaries containing results: Each result is a dictionary with the following keys: - **answer** (`str`) -- The answer of the query given the table. If there is an aggregator, the answer will be preceded by `AGGREGATOR >`. - **coordinates** (`List[Tuple[int, int]]`) -- Coordinates of the cells of the answers. - **cells** (`List[str]`) -- List of strings made up of the answer cell values. - **aggregator** (`str`) -- If the model has an aggregator, this returns the aggregator. """ pipeline_inputs = self._args_parser(*args, **kwargs) results = super().__call__(pipeline_inputs, **kwargs) if len(results) == 1: return results[0] return results def _sanitize_parameters(self, sequential=None, padding=None, truncation=None, **kwargs): preprocess_params = {} if padding is not None: preprocess_params["padding"] = padding if truncation is not None: preprocess_params["truncation"] = truncation forward_params = {} if sequential is not None: forward_params["sequential"] = sequential return preprocess_params, forward_params, {} def preprocess(self, pipeline_input, sequential=None, padding=True, truncation=None): if truncation is None: if self.type == "tapas": truncation = "drop_rows_to_fit" else: truncation = "do_not_truncate" table, query = pipeline_input["table"], pipeline_input["query"] if table.empty: raise ValueError("table is empty") if query is None or query == "": raise ValueError("query is empty") inputs = self.tokenizer(table, query, return_tensors=self.framework, truncation=truncation, padding=padding) inputs["table"] = table return inputs def _forward(self, model_inputs, sequential=False): table = model_inputs.pop("table") if self.type == "tapas": if sequential: outputs = self.sequential_inference(**model_inputs) else: outputs = self.batch_inference(**model_inputs) else: outputs = self.model.generate(**model_inputs) model_outputs = {"model_inputs": model_inputs, "table": table, "outputs": outputs} return model_outputs def postprocess(self, model_outputs): inputs = model_outputs["model_inputs"] table = model_outputs["table"] outputs = model_outputs["outputs"] if self.type == "tapas": if self.aggregate: logits, logits_agg = outputs[:2] predictions = self.tokenizer.convert_logits_to_predictions(inputs, logits, logits_agg) answer_coordinates_batch, agg_predictions = predictions aggregators = {i: self.model.config.aggregation_labels[pred] for i, pred in enumerate(agg_predictions)} no_agg_label_index = self.model.config.no_aggregation_label_index aggregators_prefix = { i: aggregators[i] + " > " for i, pred in enumerate(agg_predictions) if pred != no_agg_label_index } else: logits = outputs[0] predictions = self.tokenizer.convert_logits_to_predictions(inputs, logits) answer_coordinates_batch = predictions[0] aggregators = {} aggregators_prefix = {} answers = [] for index, coordinates in enumerate(answer_coordinates_batch): cells = [table.iat[coordinate] for coordinate in coordinates] aggregator = aggregators.get(index, "") aggregator_prefix = aggregators_prefix.get(index, "") answer = { "answer": aggregator_prefix + ", ".join(cells), "coordinates": coordinates, "cells": [table.iat[coordinate] for coordinate in coordinates], } if aggregator: answer["aggregator"] = aggregator answers.append(answer) if len(answer) == 0: raise PipelineException("Empty answer") else: answers = [{"answer": answer} for answer in self.tokenizer.batch_decode(outputs, skip_special_tokens=True)] return answers if len(answers) > 1 else answers[0]
19,892
44.521739
120
py
transformers
transformers-main/src/transformers/pipelines/zero_shot_classification.py
from typing import List, Union import numpy as np from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, logging from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline logger = logging.get_logger(__name__) class ZeroShotClassificationArgumentHandler(ArgumentHandler): """ Handles arguments for zero-shot for text classification by turning each possible label into an NLI premise/hypothesis pair. """ def _parse_labels(self, labels): if isinstance(labels, str): labels = [label.strip() for label in labels.split(",") if label.strip()] return labels def __call__(self, sequences, labels, hypothesis_template): if len(labels) == 0 or len(sequences) == 0: raise ValueError("You must include at least one label and at least one sequence.") if hypothesis_template.format(labels[0]) == hypothesis_template: raise ValueError( ( 'The provided hypothesis_template "{}" was not able to be formatted with the target labels. ' "Make sure the passed template includes formatting syntax such as {{}} where the label should go." ).format(hypothesis_template) ) if isinstance(sequences, str): sequences = [sequences] sequence_pairs = [] for sequence in sequences: sequence_pairs.extend([[sequence, hypothesis_template.format(label)] for label in labels]) return sequence_pairs, sequences @add_end_docstrings(PIPELINE_INIT_ARGS) class ZeroShotClassificationPipeline(ChunkPipeline): """ NLI-based zero-shot classification pipeline using a `ModelForSequenceClassification` trained on NLI (natural language inference) tasks. Equivalent of `text-classification` pipelines, but these models don't require a hardcoded number of potential classes, they can be chosen at runtime. It usually means it's slower but it is **much** more flexible. Any combination of sequences and labels can be passed and each combination will be posed as a premise/hypothesis pair and passed to the pretrained model. Then, the logit for *entailment* is taken as the logit for the candidate label being valid. Any NLI model can be used, but the id of the *entailment* label must be included in the model config's :attr:*~transformers.PretrainedConfig.label2id*. Example: ```python >>> from transformers import pipeline >>> oracle = pipeline(model="facebook/bart-large-mnli") >>> oracle( ... "I have a problem with my iphone that needs to be resolved asap!!", ... candidate_labels=["urgent", "not urgent", "phone", "tablet", "computer"], ... ) {'sequence': 'I have a problem with my iphone that needs to be resolved asap!!', 'labels': ['urgent', 'phone', 'computer', 'not urgent', 'tablet'], 'scores': [0.504, 0.479, 0.013, 0.003, 0.002]} >>> oracle( ... "I have a problem with my iphone that needs to be resolved asap!!", ... candidate_labels=["english", "german"], ... ) {'sequence': 'I have a problem with my iphone that needs to be resolved asap!!', 'labels': ['english', 'german'], 'scores': [0.814, 0.186]} ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This NLI pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"zero-shot-classification"`. The models that this pipeline can use are models that have been fine-tuned on an NLI task. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?search=nli). """ def __init__(self, args_parser=ZeroShotClassificationArgumentHandler(), *args, **kwargs): self._args_parser = args_parser super().__init__(*args, **kwargs) if self.entailment_id == -1: logger.warning( "Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to " "-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." ) @property def entailment_id(self): for label, ind in self.model.config.label2id.items(): if label.lower().startswith("entail"): return ind return -1 def _parse_and_tokenize( self, sequence_pairs, padding=True, add_special_tokens=True, truncation=TruncationStrategy.ONLY_FIRST, **kwargs ): """ Parse arguments and tokenize only_first so that hypothesis (label) is not truncated """ return_tensors = self.framework if self.tokenizer.pad_token is None: # Override for tokenizers not supporting padding logger.error( "Tokenizer was not supporting padding necessary for zero-shot, attempting to use " " `pad_token=eos_token`" ) self.tokenizer.pad_token = self.tokenizer.eos_token try: inputs = self.tokenizer( sequence_pairs, add_special_tokens=add_special_tokens, return_tensors=return_tensors, padding=padding, truncation=truncation, ) except Exception as e: if "too short" in str(e): # tokenizers might yell that we want to truncate # to a value that is not even reached by the input. # In that case we don't want to truncate. # It seems there's not a really better way to catch that # exception. inputs = self.tokenizer( sequence_pairs, add_special_tokens=add_special_tokens, return_tensors=return_tensors, padding=padding, truncation=TruncationStrategy.DO_NOT_TRUNCATE, ) else: raise e return inputs def _sanitize_parameters(self, **kwargs): if kwargs.get("multi_class", None) is not None: kwargs["multi_label"] = kwargs["multi_class"] logger.warning( "The `multi_class` argument has been deprecated and renamed to `multi_label`. " "`multi_class` will be removed in a future version of Transformers." ) preprocess_params = {} if "candidate_labels" in kwargs: preprocess_params["candidate_labels"] = self._args_parser._parse_labels(kwargs["candidate_labels"]) if "hypothesis_template" in kwargs: preprocess_params["hypothesis_template"] = kwargs["hypothesis_template"] postprocess_params = {} if "multi_label" in kwargs: postprocess_params["multi_label"] = kwargs["multi_label"] return preprocess_params, {}, postprocess_params def __call__( self, sequences: Union[str, List[str]], *args, **kwargs, ): """ Classify the sequence(s) given as inputs. See the [`ZeroShotClassificationPipeline`] documentation for more information. Args: sequences (`str` or `List[str]`): The sequence(s) to classify, will be truncated if the model input is too large. candidate_labels (`str` or `List[str]`): The set of possible class labels to classify each sequence into. Can be a single label, a string of comma-separated labels, or a list of labels. hypothesis_template (`str`, *optional*, defaults to `"This example is {}."`): The template used to turn each label into an NLI-style hypothesis. This template must include a {} or similar syntax for the candidate label to be inserted into the template. For example, the default template is `"This example is {}."` With the candidate label `"sports"`, this would be fed into the model like `"<cls> sequence to classify <sep> This example is sports . <sep>"`. The default template works well in many cases, but it may be worthwhile to experiment with different templates depending on the task setting. multi_label (`bool`, *optional*, defaults to `False`): Whether or not multiple candidate labels can be true. If `False`, the scores are normalized such that the sum of the label likelihoods for each sequence is 1. If `True`, the labels are considered independent and probabilities are normalized for each candidate by doing a softmax of the entailment score vs. the contradiction score. Return: A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys: - **sequence** (`str`) -- The sequence for which this is the output. - **labels** (`List[str]`) -- The labels sorted by order of likelihood. - **scores** (`List[float]`) -- The probabilities for each of the labels. """ if len(args) == 0: pass elif len(args) == 1 and "candidate_labels" not in kwargs: kwargs["candidate_labels"] = args[0] else: raise ValueError(f"Unable to understand extra arguments {args}") return super().__call__(sequences, **kwargs) def preprocess(self, inputs, candidate_labels=None, hypothesis_template="This example is {}."): sequence_pairs, sequences = self._args_parser(inputs, candidate_labels, hypothesis_template) for i, (candidate_label, sequence_pair) in enumerate(zip(candidate_labels, sequence_pairs)): model_input = self._parse_and_tokenize([sequence_pair]) yield { "candidate_label": candidate_label, "sequence": sequences[0], "is_last": i == len(candidate_labels) - 1, **model_input, } def _forward(self, inputs): candidate_label = inputs["candidate_label"] sequence = inputs["sequence"] model_inputs = {k: inputs[k] for k in self.tokenizer.model_input_names} outputs = self.model(**model_inputs) model_outputs = { "candidate_label": candidate_label, "sequence": sequence, "is_last": inputs["is_last"], **outputs, } return model_outputs def postprocess(self, model_outputs, multi_label=False): candidate_labels = [outputs["candidate_label"] for outputs in model_outputs] sequences = [outputs["sequence"] for outputs in model_outputs] logits = np.concatenate([output["logits"].numpy() for output in model_outputs]) N = logits.shape[0] n = len(candidate_labels) num_sequences = N // n reshaped_outputs = logits.reshape((num_sequences, n, -1)) if multi_label or len(candidate_labels) == 1: # softmax over the entailment vs. contradiction dim for each label independently entailment_id = self.entailment_id contradiction_id = -1 if entailment_id == 0 else 0 entail_contr_logits = reshaped_outputs[..., [contradiction_id, entailment_id]] scores = np.exp(entail_contr_logits) / np.exp(entail_contr_logits).sum(-1, keepdims=True) scores = scores[..., 1] else: # softmax the "entailment" logits over all candidate labels entail_logits = reshaped_outputs[..., self.entailment_id] scores = np.exp(entail_logits) / np.exp(entail_logits).sum(-1, keepdims=True) top_inds = list(reversed(scores[0].argsort())) return { "sequence": sequences[0], "labels": [candidate_labels[i] for i in top_inds], "scores": scores[0, top_inds].tolist(), }
11,983
44.915709
198
py
transformers
transformers-main/src/transformers/pipelines/__init__.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import io import json import os import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union from huggingface_hub import model_info from numpy import isin from ..configuration_utils import PretrainedConfig from ..dynamic_module_utils import get_class_from_dynamic_module from ..feature_extraction_utils import PreTrainedFeatureExtractor from ..image_processing_utils import BaseImageProcessor from ..models.auto.configuration_auto import AutoConfig from ..models.auto.feature_extraction_auto import FEATURE_EXTRACTOR_MAPPING, AutoFeatureExtractor from ..models.auto.image_processing_auto import IMAGE_PROCESSOR_MAPPING, AutoImageProcessor from ..models.auto.modeling_auto import AutoModelForDepthEstimation from ..models.auto.tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer from ..tokenization_utils import PreTrainedTokenizer from ..utils import ( HUGGINGFACE_CO_RESOLVE_ENDPOINT, is_kenlm_available, is_offline_mode, is_pyctcdecode_available, is_tf_available, is_torch_available, logging, ) from .audio_classification import AudioClassificationPipeline from .automatic_speech_recognition import AutomaticSpeechRecognitionPipeline from .base import ( ArgumentHandler, CsvPipelineDataFormat, JsonPipelineDataFormat, PipedPipelineDataFormat, Pipeline, PipelineDataFormat, PipelineException, PipelineRegistry, get_default_model_and_revision, infer_framework_load_model, ) from .conversational import Conversation, ConversationalPipeline from .depth_estimation import DepthEstimationPipeline from .document_question_answering import DocumentQuestionAnsweringPipeline from .feature_extraction import FeatureExtractionPipeline from .fill_mask import FillMaskPipeline from .image_classification import ImageClassificationPipeline from .image_segmentation import ImageSegmentationPipeline from .image_to_text import ImageToTextPipeline from .mask_generation import MaskGenerationPipeline from .object_detection import ObjectDetectionPipeline from .question_answering import QuestionAnsweringArgumentHandler, QuestionAnsweringPipeline from .table_question_answering import TableQuestionAnsweringArgumentHandler, TableQuestionAnsweringPipeline from .text2text_generation import SummarizationPipeline, Text2TextGenerationPipeline, TranslationPipeline from .text_classification import TextClassificationPipeline from .text_generation import TextGenerationPipeline from .token_classification import ( AggregationStrategy, NerPipeline, TokenClassificationArgumentHandler, TokenClassificationPipeline, ) from .video_classification import VideoClassificationPipeline from .visual_question_answering import VisualQuestionAnsweringPipeline from .zero_shot_audio_classification import ZeroShotAudioClassificationPipeline from .zero_shot_classification import ZeroShotClassificationArgumentHandler, ZeroShotClassificationPipeline from .zero_shot_image_classification import ZeroShotImageClassificationPipeline from .zero_shot_object_detection import ZeroShotObjectDetectionPipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import ( TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForImageClassification, TFAutoModelForMaskedLM, TFAutoModelForQuestionAnswering, TFAutoModelForSeq2SeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelForVision2Seq, TFAutoModelForZeroShotImageClassification, ) if is_torch_available(): import torch from ..models.auto.modeling_auto import ( MODEL_FOR_MASKED_LM_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, AutoModel, AutoModelForAudioClassification, AutoModelForCausalLM, AutoModelForCTC, AutoModelForDocumentQuestionAnswering, AutoModelForImageClassification, AutoModelForImageSegmentation, AutoModelForMaskedLM, AutoModelForMaskGeneration, AutoModelForObjectDetection, AutoModelForQuestionAnswering, AutoModelForSemanticSegmentation, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForSpeechSeq2Seq, AutoModelForTableQuestionAnswering, AutoModelForTokenClassification, AutoModelForVideoClassification, AutoModelForVision2Seq, AutoModelForVisualQuestionAnswering, AutoModelForZeroShotImageClassification, AutoModelForZeroShotObjectDetection, ) if TYPE_CHECKING: from ..modeling_tf_utils import TFPreTrainedModel from ..modeling_utils import PreTrainedModel from ..tokenization_utils_fast import PreTrainedTokenizerFast logger = logging.get_logger(__name__) # Register all the supported tasks here TASK_ALIASES = { "sentiment-analysis": "text-classification", "ner": "token-classification", "vqa": "visual-question-answering", } SUPPORTED_TASKS = { "audio-classification": { "impl": AudioClassificationPipeline, "tf": (), "pt": (AutoModelForAudioClassification,) if is_torch_available() else (), "default": {"model": {"pt": ("superb/wav2vec2-base-superb-ks", "372e048")}}, "type": "audio", }, "automatic-speech-recognition": { "impl": AutomaticSpeechRecognitionPipeline, "tf": (), "pt": (AutoModelForCTC, AutoModelForSpeechSeq2Seq) if is_torch_available() else (), "default": {"model": {"pt": ("facebook/wav2vec2-base-960h", "55bb623")}}, "type": "multimodal", }, "feature-extraction": { "impl": FeatureExtractionPipeline, "tf": (TFAutoModel,) if is_tf_available() else (), "pt": (AutoModel,) if is_torch_available() else (), "default": {"model": {"pt": ("distilbert-base-cased", "935ac13"), "tf": ("distilbert-base-cased", "935ac13")}}, "type": "multimodal", }, "text-classification": { "impl": TextClassificationPipeline, "tf": (TFAutoModelForSequenceClassification,) if is_tf_available() else (), "pt": (AutoModelForSequenceClassification,) if is_torch_available() else (), "default": { "model": { "pt": ("distilbert-base-uncased-finetuned-sst-2-english", "af0f99b"), "tf": ("distilbert-base-uncased-finetuned-sst-2-english", "af0f99b"), }, }, "type": "text", }, "token-classification": { "impl": TokenClassificationPipeline, "tf": (TFAutoModelForTokenClassification,) if is_tf_available() else (), "pt": (AutoModelForTokenClassification,) if is_torch_available() else (), "default": { "model": { "pt": ("dbmdz/bert-large-cased-finetuned-conll03-english", "f2482bf"), "tf": ("dbmdz/bert-large-cased-finetuned-conll03-english", "f2482bf"), }, }, "type": "text", }, "question-answering": { "impl": QuestionAnsweringPipeline, "tf": (TFAutoModelForQuestionAnswering,) if is_tf_available() else (), "pt": (AutoModelForQuestionAnswering,) if is_torch_available() else (), "default": { "model": { "pt": ("distilbert-base-cased-distilled-squad", "626af31"), "tf": ("distilbert-base-cased-distilled-squad", "626af31"), }, }, "type": "text", }, "table-question-answering": { "impl": TableQuestionAnsweringPipeline, "pt": (AutoModelForTableQuestionAnswering,) if is_torch_available() else (), "tf": (TFAutoModelForTableQuestionAnswering,) if is_tf_available() else (), "default": { "model": { "pt": ("google/tapas-base-finetuned-wtq", "69ceee2"), "tf": ("google/tapas-base-finetuned-wtq", "69ceee2"), }, }, "type": "text", }, "visual-question-answering": { "impl": VisualQuestionAnsweringPipeline, "pt": (AutoModelForVisualQuestionAnswering,) if is_torch_available() else (), "tf": (), "default": { "model": {"pt": ("dandelin/vilt-b32-finetuned-vqa", "4355f59")}, }, "type": "multimodal", }, "document-question-answering": { "impl": DocumentQuestionAnsweringPipeline, "pt": (AutoModelForDocumentQuestionAnswering,) if is_torch_available() else (), "tf": (), "default": { "model": {"pt": ("impira/layoutlm-document-qa", "52e01b3")}, }, "type": "multimodal", }, "fill-mask": { "impl": FillMaskPipeline, "tf": (TFAutoModelForMaskedLM,) if is_tf_available() else (), "pt": (AutoModelForMaskedLM,) if is_torch_available() else (), "default": {"model": {"pt": ("distilroberta-base", "ec58a5b"), "tf": ("distilroberta-base", "ec58a5b")}}, "type": "text", }, "summarization": { "impl": SummarizationPipeline, "tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (), "pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (), "default": {"model": {"pt": ("sshleifer/distilbart-cnn-12-6", "a4f8f3e"), "tf": ("t5-small", "d769bba")}}, "type": "text", }, # This task is a special case as it's parametrized by SRC, TGT languages. "translation": { "impl": TranslationPipeline, "tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (), "pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (), "default": { ("en", "fr"): {"model": {"pt": ("t5-base", "686f1db"), "tf": ("t5-base", "686f1db")}}, ("en", "de"): {"model": {"pt": ("t5-base", "686f1db"), "tf": ("t5-base", "686f1db")}}, ("en", "ro"): {"model": {"pt": ("t5-base", "686f1db"), "tf": ("t5-base", "686f1db")}}, }, "type": "text", }, "text2text-generation": { "impl": Text2TextGenerationPipeline, "tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (), "pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (), "default": {"model": {"pt": ("t5-base", "686f1db"), "tf": ("t5-base", "686f1db")}}, "type": "text", }, "text-generation": { "impl": TextGenerationPipeline, "tf": (TFAutoModelForCausalLM,) if is_tf_available() else (), "pt": (AutoModelForCausalLM,) if is_torch_available() else (), "default": {"model": {"pt": ("gpt2", "6c0e608"), "tf": ("gpt2", "6c0e608")}}, "type": "text", }, "zero-shot-classification": { "impl": ZeroShotClassificationPipeline, "tf": (TFAutoModelForSequenceClassification,) if is_tf_available() else (), "pt": (AutoModelForSequenceClassification,) if is_torch_available() else (), "default": { "model": {"pt": ("facebook/bart-large-mnli", "c626438"), "tf": ("roberta-large-mnli", "130fb28")}, "config": {"pt": ("facebook/bart-large-mnli", "c626438"), "tf": ("roberta-large-mnli", "130fb28")}, }, "type": "text", }, "zero-shot-image-classification": { "impl": ZeroShotImageClassificationPipeline, "tf": (TFAutoModelForZeroShotImageClassification,) if is_tf_available() else (), "pt": (AutoModelForZeroShotImageClassification,) if is_torch_available() else (), "default": { "model": { "pt": ("openai/clip-vit-base-patch32", "f4881ba"), "tf": ("openai/clip-vit-base-patch32", "f4881ba"), } }, "type": "multimodal", }, "zero-shot-audio-classification": { "impl": ZeroShotAudioClassificationPipeline, "tf": (), "pt": (AutoModel,) if is_torch_available() else (), "default": { "model": { "pt": ("laion/clap-htsat-fused", "973b6e5"), } }, "type": "multimodal", }, "conversational": { "impl": ConversationalPipeline, "tf": (TFAutoModelForSeq2SeqLM, TFAutoModelForCausalLM) if is_tf_available() else (), "pt": (AutoModelForSeq2SeqLM, AutoModelForCausalLM) if is_torch_available() else (), "default": { "model": {"pt": ("microsoft/DialoGPT-medium", "8bada3b"), "tf": ("microsoft/DialoGPT-medium", "8bada3b")} }, "type": "text", }, "image-classification": { "impl": ImageClassificationPipeline, "tf": (TFAutoModelForImageClassification,) if is_tf_available() else (), "pt": (AutoModelForImageClassification,) if is_torch_available() else (), "default": { "model": { "pt": ("google/vit-base-patch16-224", "5dca96d"), "tf": ("google/vit-base-patch16-224", "5dca96d"), } }, "type": "image", }, "image-segmentation": { "impl": ImageSegmentationPipeline, "tf": (), "pt": (AutoModelForImageSegmentation, AutoModelForSemanticSegmentation) if is_torch_available() else (), "default": {"model": {"pt": ("facebook/detr-resnet-50-panoptic", "fc15262")}}, "type": "multimodal", }, "image-to-text": { "impl": ImageToTextPipeline, "tf": (TFAutoModelForVision2Seq,) if is_tf_available() else (), "pt": (AutoModelForVision2Seq,) if is_torch_available() else (), "default": { "model": { "pt": ("ydshieh/vit-gpt2-coco-en", "65636df"), "tf": ("ydshieh/vit-gpt2-coco-en", "65636df"), } }, "type": "multimodal", }, "object-detection": { "impl": ObjectDetectionPipeline, "tf": (), "pt": (AutoModelForObjectDetection,) if is_torch_available() else (), "default": {"model": {"pt": ("facebook/detr-resnet-50", "2729413")}}, "type": "multimodal", }, "zero-shot-object-detection": { "impl": ZeroShotObjectDetectionPipeline, "tf": (), "pt": (AutoModelForZeroShotObjectDetection,) if is_torch_available() else (), "default": {"model": {"pt": ("google/owlvit-base-patch32", "17740e1")}}, "type": "multimodal", }, "depth-estimation": { "impl": DepthEstimationPipeline, "tf": (), "pt": (AutoModelForDepthEstimation,) if is_torch_available() else (), "default": {"model": {"pt": ("Intel/dpt-large", "e93beec")}}, "type": "image", }, "video-classification": { "impl": VideoClassificationPipeline, "tf": (), "pt": (AutoModelForVideoClassification,) if is_torch_available() else (), "default": {"model": {"pt": ("MCG-NJU/videomae-base-finetuned-kinetics", "4800870")}}, "type": "video", }, "mask-generation": { "impl": MaskGenerationPipeline, "tf": (), "pt": (AutoModelForMaskGeneration,) if is_torch_available() else (), "default": {"model": {"pt": ("facebook/sam-vit-huge", "997b15")}}, "type": "multimodal", }, } NO_FEATURE_EXTRACTOR_TASKS = set() NO_IMAGE_PROCESSOR_TASKS = set() NO_TOKENIZER_TASKS = set() # Those model configs are special, they are generic over their task, meaning # any tokenizer/feature_extractor might be use for a given model so we cannot # use the statically defined TOKENIZER_MAPPING and FEATURE_EXTRACTOR_MAPPING to # see if the model defines such objects or not. MULTI_MODEL_CONFIGS = {"SpeechEncoderDecoderConfig", "VisionEncoderDecoderConfig", "VisionTextDualEncoderConfig"} for task, values in SUPPORTED_TASKS.items(): if values["type"] == "text": NO_FEATURE_EXTRACTOR_TASKS.add(task) NO_IMAGE_PROCESSOR_TASKS.add(task) elif values["type"] in {"image", "video"}: NO_TOKENIZER_TASKS.add(task) elif values["type"] in {"audio"}: NO_TOKENIZER_TASKS.add(task) NO_IMAGE_PROCESSOR_TASKS.add(task) elif values["type"] != "multimodal": raise ValueError(f"SUPPORTED_TASK {task} contains invalid type {values['type']}") PIPELINE_REGISTRY = PipelineRegistry(supported_tasks=SUPPORTED_TASKS, task_aliases=TASK_ALIASES) def get_supported_tasks() -> List[str]: """ Returns a list of supported task strings. """ return PIPELINE_REGISTRY.get_supported_tasks() def get_task(model: str, use_auth_token: Optional[str] = None) -> str: if is_offline_mode(): raise RuntimeError("You cannot infer task automatically within `pipeline` when using offline mode") try: info = model_info(model, token=use_auth_token) except Exception as e: raise RuntimeError(f"Instantiating a pipeline without a task set raised an error: {e}") if not info.pipeline_tag: raise RuntimeError( f"The model {model} does not seem to have a correct `pipeline_tag` set to infer the task automatically" ) if getattr(info, "library_name", "transformers") != "transformers": raise RuntimeError(f"This model is meant to be used with {info.library_name} not with transformers") task = info.pipeline_tag return task def check_task(task: str) -> Tuple[str, Dict, Any]: """ Checks an incoming task string, to validate it's correct and return the default Pipeline and Model classes, and default models if they exist. Args: task (`str`): The task defining which pipeline will be returned. Currently accepted tasks are: - `"audio-classification"` - `"automatic-speech-recognition"` - `"conversational"` - `"depth-estimation"` - `"document-question-answering"` - `"feature-extraction"` - `"fill-mask"` - `"image-classification"` - `"image-segmentation"` - `"image-to-text"` - `"object-detection"` - `"question-answering"` - `"summarization"` - `"table-question-answering"` - `"text2text-generation"` - `"text-classification"` (alias `"sentiment-analysis"` available) - `"text-generation"` - `"token-classification"` (alias `"ner"` available) - `"translation"` - `"translation_xx_to_yy"` - `"video-classification"` - `"visual-question-answering"` - `"zero-shot-classification"` - `"zero-shot-image-classification"` - `"zero-shot-object-detection"` Returns: (normalized_task: `str`, task_defaults: `dict`, task_options: (`tuple`, None)) The normalized task name (removed alias and options). The actual dictionary required to initialize the pipeline and some extra task options for parametrized tasks like "translation_XX_to_YY" """ return PIPELINE_REGISTRY.check_task(task) def clean_custom_task(task_info): import transformers if "impl" not in task_info: raise RuntimeError("This model introduces a custom pipeline without specifying its implementation.") pt_class_names = task_info.get("pt", ()) if isinstance(pt_class_names, str): pt_class_names = [pt_class_names] task_info["pt"] = tuple(getattr(transformers, c) for c in pt_class_names) tf_class_names = task_info.get("tf", ()) if isinstance(tf_class_names, str): tf_class_names = [tf_class_names] task_info["tf"] = tuple(getattr(transformers, c) for c in tf_class_names) return task_info, None def pipeline( task: str = None, model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, use_auth_token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map=None, torch_dtype=None, trust_remote_code: Optional[bool] = None, model_kwargs: Dict[str, Any] = None, pipeline_class: Optional[Any] = None, **kwargs, ) -> Pipeline: """ Utility factory method to build a [`Pipeline`]. Pipelines are made of: - A [tokenizer](tokenizer) in charge of mapping raw textual input to token. - A [model](model) to make predictions from the inputs. - Some (optional) post processing for enhancing model's output. Args: task (`str`): The task defining which pipeline will be returned. Currently accepted tasks are: - `"audio-classification"`: will return a [`AudioClassificationPipeline`]. - `"automatic-speech-recognition"`: will return a [`AutomaticSpeechRecognitionPipeline`]. - `"conversational"`: will return a [`ConversationalPipeline`]. - `"depth-estimation"`: will return a [`DepthEstimationPipeline`]. - `"document-question-answering"`: will return a [`DocumentQuestionAnsweringPipeline`]. - `"feature-extraction"`: will return a [`FeatureExtractionPipeline`]. - `"fill-mask"`: will return a [`FillMaskPipeline`]:. - `"image-classification"`: will return a [`ImageClassificationPipeline`]. - `"image-segmentation"`: will return a [`ImageSegmentationPipeline`]. - `"image-to-text"`: will return a [`ImageToTextPipeline`]. - `"mask-generation"`: will return a [`MaskGenerationPipeline`]. - `"object-detection"`: will return a [`ObjectDetectionPipeline`]. - `"question-answering"`: will return a [`QuestionAnsweringPipeline`]. - `"summarization"`: will return a [`SummarizationPipeline`]. - `"table-question-answering"`: will return a [`TableQuestionAnsweringPipeline`]. - `"text2text-generation"`: will return a [`Text2TextGenerationPipeline`]. - `"text-classification"` (alias `"sentiment-analysis"` available): will return a [`TextClassificationPipeline`]. - `"text-generation"`: will return a [`TextGenerationPipeline`]:. - `"token-classification"` (alias `"ner"` available): will return a [`TokenClassificationPipeline`]. - `"translation"`: will return a [`TranslationPipeline`]. - `"translation_xx_to_yy"`: will return a [`TranslationPipeline`]. - `"video-classification"`: will return a [`VideoClassificationPipeline`]. - `"visual-question-answering"`: will return a [`VisualQuestionAnsweringPipeline`]. - `"zero-shot-classification"`: will return a [`ZeroShotClassificationPipeline`]. - `"zero-shot-image-classification"`: will return a [`ZeroShotImageClassificationPipeline`]. - `"zero-shot-audio-classification"`: will return a [`ZeroShotAudioClassificationPipeline`]. - `"zero-shot-object-detection"`: will return a [`ZeroShotObjectDetectionPipeline`]. model (`str` or [`PreTrainedModel`] or [`TFPreTrainedModel`], *optional*): The model that will be used by the pipeline to make predictions. This can be a model identifier or an actual instance of a pretrained model inheriting from [`PreTrainedModel`] (for PyTorch) or [`TFPreTrainedModel`] (for TensorFlow). If not provided, the default for the `task` will be loaded. config (`str` or [`PretrainedConfig`], *optional*): The configuration that will be used by the pipeline to instantiate the model. This can be a model identifier or an actual pretrained model configuration inheriting from [`PretrainedConfig`]. If not provided, the default configuration file for the requested model will be used. That means that if `model` is given, its default configuration will be used. However, if `model` is not supplied, this `task`'s default model's config is used instead. tokenizer (`str` or [`PreTrainedTokenizer`], *optional*): The tokenizer that will be used by the pipeline to encode data for the model. This can be a model identifier or an actual pretrained tokenizer inheriting from [`PreTrainedTokenizer`]. If not provided, the default tokenizer for the given `model` will be loaded (if it is a string). If `model` is not specified or not a string, then the default tokenizer for `config` is loaded (if it is a string). However, if `config` is also not given or not a string, then the default tokenizer for the given `task` will be loaded. feature_extractor (`str` or [`PreTrainedFeatureExtractor`], *optional*): The feature extractor that will be used by the pipeline to encode data for the model. This can be a model identifier or an actual pretrained feature extractor inheriting from [`PreTrainedFeatureExtractor`]. Feature extractors are used for non-NLP models, such as Speech or Vision models as well as multi-modal models. Multi-modal models will also require a tokenizer to be passed. If not provided, the default feature extractor for the given `model` will be loaded (if it is a string). If `model` is not specified or not a string, then the default feature extractor for `config` is loaded (if it is a string). However, if `config` is also not given or not a string, then the default feature extractor for the given `task` will be loaded. framework (`str`, *optional*): The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be installed. If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is provided. revision (`str`, *optional*, defaults to `"main"`): When passing a task name or a string model identifier: The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. use_fast (`bool`, *optional*, defaults to `True`): Whether or not to use a Fast tokenizer if possible (a [`PreTrainedTokenizerFast`]). use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). device (`int` or `str` or `torch.device`): Defines the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank like `1`) on which this pipeline will be allocated. device_map (`str` or `Dict[str, Union[int, str, torch.device]`, *optional*): Sent directly as `model_kwargs` (just a simpler shortcut). When `accelerate` library is present, set `device_map="auto"` to compute the most optimized `device_map` automatically (see [here](https://huggingface.co/docs/accelerate/main/en/package_reference/big_modeling#accelerate.cpu_offload) for more information). <Tip warning={true}> Do not use `device_map` AND `device` at the same time as they will conflict </Tip> torch_dtype (`str` or `torch.dtype`, *optional*): Sent directly as `model_kwargs` (just a simpler shortcut) to use the available precision for this model (`torch.float16`, `torch.bfloat16`, ... or `"auto"`). trust_remote_code (`bool`, *optional*, defaults to `False`): Whether or not to allow for custom code defined on the Hub in their own modeling, configuration, tokenization or even pipeline files. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. model_kwargs (`Dict[str, Any]`, *optional*): Additional dictionary of keyword arguments passed along to the model's `from_pretrained(..., **model_kwargs)` function. kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to the specific pipeline init (see the documentation for the corresponding pipeline class for possible values). Returns: [`Pipeline`]: A suitable pipeline for the task. Examples: ```python >>> from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer >>> # Sentiment analysis pipeline >>> analyzer = pipeline("sentiment-analysis") >>> # Question answering pipeline, specifying the checkpoint identifier >>> oracle = pipeline( ... "question-answering", model="distilbert-base-cased-distilled-squad", tokenizer="bert-base-cased" ... ) >>> # Named entity recognition pipeline, passing in a specific model and tokenizer >>> model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> recognizer = pipeline("ner", model=model, tokenizer=tokenizer) ```""" if model_kwargs is None: model_kwargs = {} # Make sure we only pass use_auth_token once as a kwarg (it used to be possible to pass it in model_kwargs, # this is to keep BC). use_auth_token = model_kwargs.pop("use_auth_token", use_auth_token) hub_kwargs = { "revision": revision, "use_auth_token": use_auth_token, "trust_remote_code": trust_remote_code, "_commit_hash": None, } if task is None and model is None: raise RuntimeError( "Impossible to instantiate a pipeline without either a task or a model " "being specified. " "Please provide a task class or a model" ) if model is None and tokenizer is not None: raise RuntimeError( "Impossible to instantiate a pipeline with tokenizer specified but not the model as the provided tokenizer" " may not be compatible with the default model. Please provide a PreTrainedModel class or a" " path/identifier to a pretrained model when providing tokenizer." ) if model is None and feature_extractor is not None: raise RuntimeError( "Impossible to instantiate a pipeline with feature_extractor specified but not the model as the provided" " feature_extractor may not be compatible with the default model. Please provide a PreTrainedModel class" " or a path/identifier to a pretrained model when providing feature_extractor." ) if isinstance(model, Path): model = str(model) # Config is the primordial information item. # Instantiate config if needed if isinstance(config, str): config = AutoConfig.from_pretrained(config, _from_pipeline=task, **hub_kwargs, **model_kwargs) hub_kwargs["_commit_hash"] = config._commit_hash elif config is None and isinstance(model, str): config = AutoConfig.from_pretrained(model, _from_pipeline=task, **hub_kwargs, **model_kwargs) hub_kwargs["_commit_hash"] = config._commit_hash custom_tasks = {} if config is not None and len(getattr(config, "custom_pipelines", {})) > 0: custom_tasks = config.custom_pipelines if task is None and trust_remote_code is not False: if len(custom_tasks) == 1: task = list(custom_tasks.keys())[0] else: raise RuntimeError( "We can't infer the task automatically for this model as there are multiple tasks available. Pick " f"one in {', '.join(custom_tasks.keys())}" ) if task is None and model is not None: if not isinstance(model, str): raise RuntimeError( "Inferring the task automatically requires to check the hub with a model_id defined as a `str`." f"{model} is not a valid model_id." ) task = get_task(model, use_auth_token) # Retrieve the task if task in custom_tasks: normalized_task = task targeted_task, task_options = clean_custom_task(custom_tasks[task]) if pipeline_class is None: if not trust_remote_code: raise ValueError( "Loading this pipeline requires you to execute the code in the pipeline file in that" " repo on your local machine. Make sure you have read the code there to avoid malicious use, then" " set the option `trust_remote_code=True` to remove this error." ) class_ref = targeted_task["impl"] pipeline_class = get_class_from_dynamic_module( class_ref, model, revision=revision, use_auth_token=use_auth_token ) else: normalized_task, targeted_task, task_options = check_task(task) if pipeline_class is None: pipeline_class = targeted_task["impl"] # Use default model/config/tokenizer for the task if no model is provided if model is None: # At that point framework might still be undetermined model, default_revision = get_default_model_and_revision(targeted_task, framework, task_options) revision = revision if revision is not None else default_revision logger.warning( f"No model was supplied, defaulted to {model} and revision" f" {revision} ({HUGGINGFACE_CO_RESOLVE_ENDPOINT}/{model}).\n" "Using a pipeline without specifying a model name and revision in production is not recommended." ) if config is None and isinstance(model, str): config = AutoConfig.from_pretrained(model, _from_pipeline=task, **hub_kwargs, **model_kwargs) hub_kwargs["_commit_hash"] = config._commit_hash if device_map is not None: if "device_map" in model_kwargs: raise ValueError( 'You cannot use both `pipeline(... device_map=..., model_kwargs={"device_map":...})` as those' " arguments might conflict, use only one.)" ) if device is not None: logger.warning( "Both `device` and `device_map` are specified. `device` will override `device_map`. You" " will most likely encounter unexpected behavior. Please remove `device` and keep `device_map`." ) model_kwargs["device_map"] = device_map if torch_dtype is not None: if "torch_dtype" in model_kwargs: raise ValueError( 'You cannot use both `pipeline(... torch_dtype=..., model_kwargs={"torch_dtype":...})` as those' " arguments might conflict, use only one.)" ) model_kwargs["torch_dtype"] = torch_dtype model_name = model if isinstance(model, str) else None # Load the correct model if possible # Infer the framework from the model if not already defined if isinstance(model, str) or framework is None: model_classes = {"tf": targeted_task["tf"], "pt": targeted_task["pt"]} framework, model = infer_framework_load_model( model, model_classes=model_classes, config=config, framework=framework, task=task, **hub_kwargs, **model_kwargs, ) model_config = model.config hub_kwargs["_commit_hash"] = model.config._commit_hash load_tokenizer = type(model_config) in TOKENIZER_MAPPING or model_config.tokenizer_class is not None load_feature_extractor = type(model_config) in FEATURE_EXTRACTOR_MAPPING or feature_extractor is not None load_image_processor = type(model_config) in IMAGE_PROCESSOR_MAPPING or image_processor is not None # If `model` (instance of `PretrainedModel` instead of `str`) is passed (and/or same for config), while # `image_processor` or `feature_extractor` is `None`, the loading will fail. This happens particularly for some # vision tasks when calling `pipeline()` with `model` and only one of the `image_processor` and `feature_extractor`. # TODO: we need to make `NO_IMAGE_PROCESSOR_TASKS` and `NO_FEATURE_EXTRACTOR_TASKS` more robust to avoid such issue. # This block is only temporarily to make CI green. if load_image_processor and load_feature_extractor: load_feature_extractor = False if ( tokenizer is None and not load_tokenizer and normalized_task not in NO_TOKENIZER_TASKS # Using class name to avoid importing the real class. and model_config.__class__.__name__ in MULTI_MODEL_CONFIGS ): # This is a special category of models, that are fusions of multiple models # so the model_config might not define a tokenizer, but it seems to be # necessary for the task, so we're force-trying to load it. load_tokenizer = True if ( image_processor is None and not load_image_processor and normalized_task not in NO_IMAGE_PROCESSOR_TASKS # Using class name to avoid importing the real class. and model_config.__class__.__name__ in MULTI_MODEL_CONFIGS and normalized_task != "automatic-speech-recognition" ): # This is a special category of models, that are fusions of multiple models # so the model_config might not define a tokenizer, but it seems to be # necessary for the task, so we're force-trying to load it. load_image_processor = True if ( feature_extractor is None and not load_feature_extractor and normalized_task not in NO_FEATURE_EXTRACTOR_TASKS # Using class name to avoid importing the real class. and model_config.__class__.__name__ in MULTI_MODEL_CONFIGS ): # This is a special category of models, that are fusions of multiple models # so the model_config might not define a tokenizer, but it seems to be # necessary for the task, so we're force-trying to load it. load_feature_extractor = True if task in NO_TOKENIZER_TASKS: # These will never require a tokenizer. # the model on the other hand might have a tokenizer, but # the files could be missing from the hub, instead of failing # on such repos, we just force to not load it. load_tokenizer = False if task in NO_FEATURE_EXTRACTOR_TASKS: load_feature_extractor = False if task in NO_IMAGE_PROCESSOR_TASKS: load_image_processor = False if load_tokenizer: # Try to infer tokenizer from model or config name (if provided as str) if tokenizer is None: if isinstance(model_name, str): tokenizer = model_name elif isinstance(config, str): tokenizer = config else: # Impossible to guess what is the right tokenizer here raise Exception( "Impossible to guess which tokenizer to use. " "Please provide a PreTrainedTokenizer class or a path/identifier to a pretrained tokenizer." ) # Instantiate tokenizer if needed if isinstance(tokenizer, (str, tuple)): if isinstance(tokenizer, tuple): # For tuple we have (tokenizer name, {kwargs}) use_fast = tokenizer[1].pop("use_fast", use_fast) tokenizer_identifier = tokenizer[0] tokenizer_kwargs = tokenizer[1] else: tokenizer_identifier = tokenizer tokenizer_kwargs = model_kwargs.copy() tokenizer_kwargs.pop("torch_dtype", None) tokenizer = AutoTokenizer.from_pretrained( tokenizer_identifier, use_fast=use_fast, _from_pipeline=task, **hub_kwargs, **tokenizer_kwargs ) if load_image_processor: # Try to infer image processor from model or config name (if provided as str) if image_processor is None: if isinstance(model_name, str): image_processor = model_name elif isinstance(config, str): image_processor = config # Backward compatibility, as `feature_extractor` used to be the name # for `ImageProcessor`. elif feature_extractor is not None and isinstance(feature_extractor, BaseImageProcessor): image_processor = feature_extractor else: # Impossible to guess what is the right image_processor here raise Exception( "Impossible to guess which image processor to use. " "Please provide a PreTrainedImageProcessor class or a path/identifier " "to a pretrained image processor." ) # Instantiate image_processor if needed if isinstance(image_processor, (str, tuple)): image_processor = AutoImageProcessor.from_pretrained( image_processor, _from_pipeline=task, **hub_kwargs, **model_kwargs ) if load_feature_extractor: # Try to infer feature extractor from model or config name (if provided as str) if feature_extractor is None: if isinstance(model_name, str): feature_extractor = model_name elif isinstance(config, str): feature_extractor = config else: # Impossible to guess what is the right feature_extractor here raise Exception( "Impossible to guess which feature extractor to use. " "Please provide a PreTrainedFeatureExtractor class or a path/identifier " "to a pretrained feature extractor." ) # Instantiate feature_extractor if needed if isinstance(feature_extractor, (str, tuple)): feature_extractor = AutoFeatureExtractor.from_pretrained( feature_extractor, _from_pipeline=task, **hub_kwargs, **model_kwargs ) if ( feature_extractor._processor_class and feature_extractor._processor_class.endswith("WithLM") and isinstance(model_name, str) ): try: import kenlm # to trigger `ImportError` if not installed from pyctcdecode import BeamSearchDecoderCTC if os.path.isdir(model_name) or os.path.isfile(model_name): decoder = BeamSearchDecoderCTC.load_from_dir(model_name) else: language_model_glob = os.path.join( BeamSearchDecoderCTC._LANGUAGE_MODEL_SERIALIZED_DIRECTORY, "*" ) alphabet_filename = BeamSearchDecoderCTC._ALPHABET_SERIALIZED_FILENAME allow_patterns = [language_model_glob, alphabet_filename] decoder = BeamSearchDecoderCTC.load_from_hf_hub(model_name, allow_patterns=allow_patterns) kwargs["decoder"] = decoder except ImportError as e: logger.warning(f"Could not load the `decoder` for {model_name}. Defaulting to raw CTC. Error: {e}") if not is_kenlm_available(): logger.warning("Try to install `kenlm`: `pip install kenlm") if not is_pyctcdecode_available(): logger.warning("Try to install `pyctcdecode`: `pip install pyctcdecode") if task == "translation" and model.config.task_specific_params: for key in model.config.task_specific_params: if key.startswith("translation"): task = key warnings.warn( f'"translation" task was used, instead of "translation_XX_to_YY", defaulting to "{task}"', UserWarning, ) break if tokenizer is not None: kwargs["tokenizer"] = tokenizer if feature_extractor is not None: kwargs["feature_extractor"] = feature_extractor if torch_dtype is not None: kwargs["torch_dtype"] = torch_dtype if image_processor is not None: kwargs["image_processor"] = image_processor if device is not None: kwargs["device"] = device return pipeline_class(model=model, framework=framework, task=task, **kwargs)
46,315
45.831143
120
py
transformers
transformers-main/src/transformers/pipelines/audio_utils.py
# Copyright 2023 The HuggingFace Team. All rights reserved. import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def ffmpeg_read(bpayload: bytes, sampling_rate: int) -> np.array: """ Helper function to read an audio file through ffmpeg. """ ar = f"{sampling_rate}" ac = "1" format_for_conversion = "f32le" ffmpeg_command = [ "ffmpeg", "-i", "pipe:0", "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-hide_banner", "-loglevel", "quiet", "pipe:1", ] try: with subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE) as ffmpeg_process: output_stream = ffmpeg_process.communicate(bpayload) except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to load audio files from filename") from error out_bytes = output_stream[0] audio = np.frombuffer(out_bytes, np.float32) if audio.shape[0] == 0: raise ValueError("Malformed soundfile") return audio def ffmpeg_microphone( sampling_rate: int, chunk_length_s: float, format_for_conversion: str = "f32le", ): """ Helper function ro read raw microphone data. """ ar = f"{sampling_rate}" ac = "1" if format_for_conversion == "s16le": size_of_sample = 2 elif format_for_conversion == "f32le": size_of_sample = 4 else: raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`") system = platform.system() if system == "Linux": format_ = "alsa" input_ = "default" elif system == "Darwin": format_ = "avfoundation" input_ = ":0" elif system == "Windows": format_ = "dshow" input_ = "default" ffmpeg_command = [ "ffmpeg", "-f", format_, "-i", input_, "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-fflags", "nobuffer", "-hide_banner", "-loglevel", "quiet", "pipe:1", ] chunk_len = int(round(sampling_rate * chunk_length_s)) * size_of_sample iterator = _ffmpeg_stream(ffmpeg_command, chunk_len) for item in iterator: yield item def ffmpeg_microphone_live( sampling_rate: int, chunk_length_s: float, stream_chunk_s: Optional[int] = None, stride_length_s: Optional[Union[Tuple[float, float], float]] = None, format_for_conversion: str = "f32le", ): """ Helper function to read audio from the microphone file through ffmpeg. This will output `partial` overlapping chunks starting from `stream_chunk_s` (if it is defined) until `chunk_length_s` is reached. It will make use of striding to avoid errors on the "sides" of the various chunks. Arguments: sampling_rate (`int`): The sampling_rate to use when reading the data from the microphone. Try using the model's sampling_rate to avoid resampling later. chunk_length_s (`float` or `int`): The length of the maximum chunk of audio to be sent returned. This includes the eventual striding. stream_chunk_s (`float` or `int`) The length of the minimal temporary audio to be returned. stride_length_s (`float` or `int` or `(float, float)`, *optional*, defaults to `None`) The length of the striding to be used. Stride is used to provide context to a model on the (left, right) of an audio sample but without using that part to actually make the prediction. Setting this does not change the length of the chunk. format_for_conversion (`str`, defalts to `f32le`) The name of the format of the audio samples to be returned by ffmpeg. The standard is `f32le`, `s16le` could also be used. Return: A generator yielding dictionaries of the following form `{"sampling_rate": int, "raw": np.array(), "partial" bool}` With optionnally a `"stride" (int, int)` key if `stride_length_s` is defined. `stride` and `raw` are all expressed in `samples`, and `partial` is a boolean saying if the current yield item is a whole chunk, or a partial temporary result to be later replaced by another larger chunk. """ if stream_chunk_s is not None: chunk_s = stream_chunk_s else: chunk_s = chunk_length_s microphone = ffmpeg_microphone(sampling_rate, chunk_s, format_for_conversion=format_for_conversion) if format_for_conversion == "s16le": dtype = np.int16 size_of_sample = 2 elif format_for_conversion == "f32le": dtype = np.float32 size_of_sample = 4 else: raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`") if stride_length_s is None: stride_length_s = chunk_length_s / 6 chunk_len = int(round(sampling_rate * chunk_length_s)) * size_of_sample if isinstance(stride_length_s, (int, float)): stride_length_s = [stride_length_s, stride_length_s] stride_left = int(round(sampling_rate * stride_length_s[0])) * size_of_sample stride_right = int(round(sampling_rate * stride_length_s[1])) * size_of_sample audio_time = datetime.datetime.now() delta = datetime.timedelta(seconds=chunk_s) for item in chunk_bytes_iter(microphone, chunk_len, stride=(stride_left, stride_right), stream=True): # Put everything back in numpy scale item["raw"] = np.frombuffer(item["raw"], dtype=dtype) item["stride"] = ( item["stride"][0] // size_of_sample, item["stride"][1] // size_of_sample, ) item["sampling_rate"] = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def chunk_bytes_iter(iterator, chunk_len: int, stride: Tuple[int, int], stream: bool = False): """ Reads raw bytes from an iterator and does chunks of length `chunk_len`. Optionally adds `stride` to each chunks to get overlaps. `stream` is used to return partial results even if a full `chunk_len` is not yet available. """ acc = b"" stride_left, stride_right = stride if stride_left + stride_right >= chunk_len: raise ValueError( f"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" ) _stride_left = 0 for raw in iterator: acc += raw if stream and len(acc) < chunk_len: stride = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(acc) >= chunk_len: # We are flushing the accumulator stride = (_stride_left, stride_right) item = {"raw": acc[:chunk_len], "stride": stride} if stream: item["partial"] = False yield item _stride_left = stride_left acc = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(acc) > stride_left: item = {"raw": acc, "stride": (_stride_left, 0)} if stream: item["partial"] = False yield item def _ffmpeg_stream(ffmpeg_command, buflen: int): """ Internal function to create the generator of data through ffmpeg """ bufsize = 2**24 # 16Mo try: with subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE, bufsize=bufsize) as ffmpeg_process: while True: raw = ffmpeg_process.stdout.read(buflen) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to stream audio files from filename") from error
8,094
34.977778
119
py
transformers
transformers-main/src/transformers/pipelines/visual_question_answering.py
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING logger = logging.get_logger(__name__) @add_end_docstrings(PIPELINE_INIT_ARGS) class VisualQuestionAnsweringPipeline(Pipeline): """ Visual Question Answering pipeline using a `AutoModelForVisualQuestionAnswering`. This pipeline is currently only available in PyTorch. Example: ```python >>> from transformers import pipeline >>> oracle = pipeline(model="dandelin/vilt-b32-finetuned-vqa") >>> image_url = "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/lena.png" >>> oracle(question="What is she wearing ?", image=image_url) [{'score': 0.948, 'answer': 'hat'}, {'score': 0.009, 'answer': 'fedora'}, {'score': 0.003, 'answer': 'clothes'}, {'score': 0.003, 'answer': 'sun hat'}, {'score': 0.002, 'answer': 'nothing'}] >>> oracle(question="What is she wearing ?", image=image_url, top_k=1) [{'score': 0.948, 'answer': 'hat'}] >>> oracle(question="Is this a person ?", image=image_url, top_k=1) [{'score': 0.993, 'answer': 'yes'}] >>> oracle(question="Is this a man ?", image=image_url, top_k=1) [{'score': 0.996, 'answer': 'no'}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This visual question answering pipeline can currently be loaded from [`pipeline`] using the following task identifiers: `"visual-question-answering", "vqa"`. The models that this pipeline can use are models that have been fine-tuned on a visual question answering task. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=visual-question-answering). """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.check_model_type(MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING) def _sanitize_parameters(self, top_k=None, padding=None, truncation=None, **kwargs): preprocess_params, postprocess_params = {}, {} if padding is not None: preprocess_params["padding"] = padding if truncation is not None: preprocess_params["truncation"] = truncation if top_k is not None: postprocess_params["top_k"] = top_k return preprocess_params, {}, postprocess_params def __call__(self, image: Union["Image.Image", str], question: str = None, **kwargs): r""" Answers open-ended questions about images. The pipeline accepts several types of inputs which are detailed below: - `pipeline(image=image, question=question)` - `pipeline({"image": image, "question": question})` - `pipeline([{"image": image, "question": question}])` - `pipeline([{"image": image, "question": question}, {"image": image, "question": question}])` Args: image (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images. If given a single image, it can be broadcasted to multiple questions. question (`str`, `List[str]`): The question(s) asked. If given a single question, it can be broadcasted to multiple images. top_k (`int`, *optional*, defaults to 5): The number of top labels that will be returned by the pipeline. If the provided number is higher than the number of labels available in the model configuration, it will default to the number of labels. Return: A dictionary or a list of dictionaries containing the result. The dictionaries contain the following keys: - **label** (`str`) -- The label identified by the model. - **score** (`int`) -- The score attributed by the model for that label. """ if isinstance(image, (Image.Image, str)) and isinstance(question, str): inputs = {"image": image, "question": question} else: """ Supports the following format - {"image": image, "question": question} - [{"image": image, "question": question}] - Generator and datasets """ inputs = image results = super().__call__(inputs, **kwargs) return results def preprocess(self, inputs, padding=False, truncation=False): image = load_image(inputs["image"]) model_inputs = self.tokenizer( inputs["question"], return_tensors=self.framework, padding=padding, truncation=truncation ) image_features = self.image_processor(images=image, return_tensors=self.framework) model_inputs.update(image_features) return model_inputs def _forward(self, model_inputs): model_outputs = self.model(**model_inputs) return model_outputs def postprocess(self, model_outputs, top_k=5): if top_k > self.model.config.num_labels: top_k = self.model.config.num_labels if self.framework == "pt": probs = model_outputs.logits.sigmoid()[0] scores, ids = probs.topk(top_k) else: raise ValueError(f"Unsupported framework: {self.framework}") scores = scores.tolist() ids = ids.tolist() return [{"score": score, "answer": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)]
5,968
42.253623
194
py
transformers
transformers-main/src/transformers/pipelines/text_generation.py
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class ReturnType(enum.Enum): TENSORS = 0 NEW_TEXT = 1 FULL_TEXT = 2 @add_end_docstrings(PIPELINE_INIT_ARGS) class TextGenerationPipeline(Pipeline): """ Language generation pipeline using any `ModelWithLMHead`. This pipeline predicts the words that will follow a specified text prompt. Example: ```python >>> from transformers import pipeline >>> generator = pipeline(model="gpt2") >>> generator("I can't believe you did such a ", do_sample=False) [{'generated_text': "I can't believe you did such a icky thing to me. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I"}] >>> # These parameters will return suggestions, and only the newly created text making it easier for prompting suggestions. >>> outputs = generator("My tart needs some", num_return_sequences=4, return_full_text=False) ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This language generation pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"text-generation"`. The models that this pipeline can use are models that have been trained with an autoregressive language modeling objective, which includes the uni-directional models in the library (e.g. gpt2). See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=text-generation). """ # Prefix text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia # in https://github.com/rusiaaman/XLNet-gen#methodology # and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e XL_PREFIX = """ In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. prefix = None if self.model.config.prefix is not None: prefix = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. prefix = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. preprocess_params, forward_params, _ = self._sanitize_parameters(prefix=prefix, **self._forward_params) self._preprocess_params = {**self._preprocess_params, **preprocess_params} self._forward_params = {**self._forward_params, **forward_params} def _sanitize_parameters( self, return_full_text=None, return_tensors=None, return_text=None, return_type=None, clean_up_tokenization_spaces=None, prefix=None, handle_long_generation=None, stop_sequence=None, **generate_kwargs, ): preprocess_params = {} if prefix is not None: preprocess_params["prefix"] = prefix if prefix: prefix_inputs = self.tokenizer( prefix, padding=False, add_special_tokens=False, return_tensors=self.framework ) generate_kwargs["prefix_length"] = prefix_inputs["input_ids"].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected" " [None, 'hole']" ) preprocess_params["handle_long_generation"] = handle_long_generation preprocess_params.update(generate_kwargs) forward_params = generate_kwargs postprocess_params = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError("`return_text` is mutually exclusive with `return_full_text`") if return_tensors is not None: raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`") return_type = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError("`return_text` is mutually exclusive with `return_tensors`") return_type = ReturnType.TENSORS if return_type is not None: postprocess_params["return_type"] = return_type if clean_up_tokenization_spaces is not None: postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces if stop_sequence is not None: stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False) if len(stop_sequence_ids) > 1: warnings.warn( "Stopping on a multiple token sequence is not yet supported on transformers. The first token of" " the stop sequence will be used as the stop sequence string in the interim." ) generate_kwargs["eos_token_id"] = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params # overriding _parse_and_tokenize to allow for unusual language-modeling tokenizer arguments def _parse_and_tokenize(self, *args, **kwargs): """ Parse arguments and tokenize """ # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({"add_space_before_punct_symbol": True}) return super()._parse_and_tokenize(*args, **kwargs) def __call__(self, text_inputs, **kwargs): """ Complete the prompt(s) given as inputs. Args: args (`str` or `List[str]`): One or several prompts (or one list of prompts) to complete. return_tensors (`bool`, *optional*, defaults to `False`): Whether or not to return the tensors of predictions (as token indices) in the outputs. If set to `True`, the decoded text is not returned. return_text (`bool`, *optional*, defaults to `True`): Whether or not to return the decoded texts in the outputs. return_full_text (`bool`, *optional*, defaults to `True`): If set to `False` only added text is returned, otherwise the full text is returned. Only meaningful if *return_text* is set to True. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to clean up the potential extra spaces in the text output. prefix (`str`, *optional*): Prefix added to prompt. handle_long_generation (`str`, *optional*): By default, this pipelines does not handle long generation (ones that exceed in one form or the other the model maximum length). There is no perfect way to adress this (more info :https://github.com/huggingface/transformers/issues/14033#issuecomment-948385227). This provides common strategies to work around that problem depending on your use case. - `None` : default strategy where nothing in particular happens - `"hole"`: Truncates left of input, and leaves a gap wide enough to let generation happen (might truncate a lot of the prompt and not suitable when generation exceed the model capacity) generate_kwargs: Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework [here](./model#generative-models)). Return: A list or a list of list of `dict`: Returns one of the following dictionaries (cannot return a combination of both `generated_text` and `generated_token_ids`): - **generated_text** (`str`, present when `return_text=True`) -- The generated text. - **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token ids of the generated text. """ return super().__call__(text_inputs, **kwargs) def preprocess(self, prompt_text, prefix="", handle_long_generation=None, **generate_kwargs): inputs = self.tokenizer( prefix + prompt_text, padding=False, add_special_tokens=False, return_tensors=self.framework ) inputs["prompt_text"] = prompt_text if handle_long_generation == "hole": cur_len = inputs["input_ids"].shape[-1] if "max_new_tokens" in generate_kwargs: new_tokens = generate_kwargs["max_new_tokens"] else: new_tokens = generate_kwargs.get("max_length", self.model.config.max_length) - cur_len if new_tokens < 0: raise ValueError("We cannot infer how many new tokens are expected") if cur_len + new_tokens > self.tokenizer.model_max_length: keep_length = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( "We cannot use `hole` to handle this generation the number of desired tokens exceeds the" " models max length" ) inputs["input_ids"] = inputs["input_ids"][:, -keep_length:] if "attention_mask" in inputs: inputs["attention_mask"] = inputs["attention_mask"][:, -keep_length:] return inputs def _forward(self, model_inputs, **generate_kwargs): input_ids = model_inputs["input_ids"] attention_mask = model_inputs.get("attention_mask", None) # Allow empty prompts if input_ids.shape[1] == 0: input_ids = None attention_mask = None in_b = 1 else: in_b = input_ids.shape[0] prompt_text = model_inputs.pop("prompt_text") # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. prefix_length = generate_kwargs.pop("prefix_length", 0) if prefix_length > 0: has_max_new_tokens = "max_new_tokens" in generate_kwargs or ( "generation_config" in generate_kwargs and generate_kwargs["generation_config"].max_new_tokens is not None ) if not has_max_new_tokens: generate_kwargs["max_length"] = generate_kwargs.get("max_length") or self.model.config.max_length generate_kwargs["max_length"] += prefix_length has_min_new_tokens = "min_new_tokens" in generate_kwargs or ( "generation_config" in generate_kwargs and generate_kwargs["generation_config"].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL generated_sequence = self.model.generate(input_ids=input_ids, attention_mask=attention_mask, **generate_kwargs) out_b = generated_sequence.shape[0] if self.framework == "pt": generated_sequence = generated_sequence.reshape(in_b, out_b // in_b, *generated_sequence.shape[1:]) elif self.framework == "tf": generated_sequence = tf.reshape(generated_sequence, (in_b, out_b // in_b, *generated_sequence.shape[1:])) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def postprocess(self, model_outputs, return_type=ReturnType.FULL_TEXT, clean_up_tokenization_spaces=True): generated_sequence = model_outputs["generated_sequence"][0] input_ids = model_outputs["input_ids"] prompt_text = model_outputs["prompt_text"] generated_sequence = generated_sequence.numpy().tolist() records = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: record = {"generated_token_ids": sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text text = self.tokenizer.decode( sequence, skip_special_tokens=True, clean_up_tokenization_spaces=clean_up_tokenization_spaces, ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: prompt_length = 0 else: prompt_length = len( self.tokenizer.decode( input_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=clean_up_tokenization_spaces, ) ) if return_type == ReturnType.FULL_TEXT: all_text = prompt_text + text[prompt_length:] else: all_text = text[prompt_length:] record = {"generated_text": all_text} records.append(record) return records
15,205
48.530945
176
py
transformers
transformers-main/src/transformers/pipelines/feature_extraction.py
from typing import Dict from .base import GenericTensor, Pipeline # Can't use @add_end_docstrings(PIPELINE_INIT_ARGS) here because this one does not accept `binary_output` class FeatureExtractionPipeline(Pipeline): """ Feature extraction pipeline using no model head. This pipeline extracts the hidden states from the base transformer, which can be used as features in downstream tasks. Example: ```python >>> from transformers import pipeline >>> extractor = pipeline(model="bert-base-uncased", task="feature-extraction") >>> result = extractor("This is a simple test.", return_tensors=True) >>> result.shape # This is a tensor of shape [1, sequence_lenth, hidden_dimension] representing the input string. torch.Size([1, 8, 768]) ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This feature extraction pipeline can currently be loaded from [`pipeline`] using the task identifier: `"feature-extraction"`. All models may be used for this pipeline. See a list of all models, including community-contributed models on [huggingface.co/models](https://huggingface.co/models). Arguments: model ([`PreTrainedModel`] or [`TFPreTrainedModel`]): The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from [`PreTrainedModel`] for PyTorch and [`TFPreTrainedModel`] for TensorFlow. tokenizer ([`PreTrainedTokenizer`]): The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from [`PreTrainedTokenizer`]. modelcard (`str` or [`ModelCard`], *optional*): Model card attributed to the model for this pipeline. framework (`str`, *optional*): The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be installed. If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is provided. return_tensors (`bool`, *optional*): If `True`, returns a tensor according to the specified framework, otherwise returns a list. task (`str`, defaults to `""`): A task-identifier for the pipeline. args_parser ([`~pipelines.ArgumentHandler`], *optional*): Reference to the object in charge of parsing supplied pipeline parameters. device (`int`, *optional*, defaults to -1): Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. tokenize_kwargs (`dict`, *optional*): Additional dictionary of keyword arguments passed along to the tokenizer. """ def _sanitize_parameters(self, truncation=None, tokenize_kwargs=None, return_tensors=None, **kwargs): if tokenize_kwargs is None: tokenize_kwargs = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( "truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)" ) tokenize_kwargs["truncation"] = truncation preprocess_params = tokenize_kwargs postprocess_params = {} if return_tensors is not None: postprocess_params["return_tensors"] = return_tensors return preprocess_params, {}, postprocess_params def preprocess(self, inputs, **tokenize_kwargs) -> Dict[str, GenericTensor]: return_tensors = self.framework model_inputs = self.tokenizer(inputs, return_tensors=return_tensors, **tokenize_kwargs) return model_inputs def _forward(self, model_inputs): model_outputs = self.model(**model_inputs) return model_outputs def postprocess(self, model_outputs, return_tensors=False): # [0] is the first available tensor, logits or last_hidden_state. if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__(self, *args, **kwargs): """ Extract the features of the input(s). Args: args (`str` or `List[str]`): One or several texts (or one list of texts) to get the features of. Return: A nested list of `float`: The features computed by the model. """ return super().__call__(*args, **kwargs)
4,822
43.657407
119
py
transformers
transformers-main/src/transformers/pipelines/image_classification.py
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING logger = logging.get_logger(__name__) @add_end_docstrings(PIPELINE_INIT_ARGS) class ImageClassificationPipeline(Pipeline): """ Image classification pipeline using any `AutoModelForImageClassification`. This pipeline predicts the class of an image. Example: ```python >>> from transformers import pipeline >>> classifier = pipeline(model="microsoft/beit-base-patch16-224-pt22k-ft22k") >>> classifier("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png") [{'score': 0.442, 'label': 'macaw'}, {'score': 0.088, 'label': 'popinjay'}, {'score': 0.075, 'label': 'parrot'}, {'score': 0.073, 'label': 'parodist, lampooner'}, {'score': 0.046, 'label': 'poll, poll_parrot'}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This image classification pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"image-classification"`. See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=image-classification). """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) requires_backends(self, "vision") self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def _sanitize_parameters(self, top_k=None): postprocess_params = {} if top_k is not None: postprocess_params["top_k"] = top_k return {}, {}, postprocess_params def __call__(self, images: Union[str, List[str], "Image.Image", List["Image.Image"]], **kwargs): """ Assign labels to the image(s) passed as inputs. Args: images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images, which must then be passed as a string. Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL images. top_k (`int`, *optional*, defaults to 5): The number of top labels that will be returned by the pipeline. If the provided number is higher than the number of labels available in the model configuration, it will default to the number of labels. Return: A dictionary or a list of dictionaries containing result. If the input is a single image, will return a dictionary, if the input is a list of several images, will return a list of dictionaries corresponding to the images. The dictionaries contain the following keys: - **label** (`str`) -- The label identified by the model. - **score** (`int`) -- The score attributed by the model for that label. """ return super().__call__(images, **kwargs) def preprocess(self, image): image = load_image(image) model_inputs = self.image_processor(images=image, return_tensors=self.framework) return model_inputs def _forward(self, model_inputs): model_outputs = self.model(**model_inputs) return model_outputs def postprocess(self, model_outputs, top_k=5): if top_k > self.model.config.num_labels: top_k = self.model.config.num_labels if self.framework == "pt": probs = model_outputs.logits.softmax(-1)[0] scores, ids = probs.topk(top_k) elif self.framework == "tf": probs = stable_softmax(model_outputs.logits, axis=-1)[0] topk = tf.math.top_k(probs, k=top_k) scores, ids = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f"Unsupported framework: {self.framework}") scores = scores.tolist() ids = ids.tolist() return [{"score": score, "label": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)]
4,940
37.601563
214
py
transformers
transformers-main/src/transformers/pipelines/zero_shot_audio_classification.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline logger = logging.get_logger(__name__) @add_end_docstrings(PIPELINE_INIT_ARGS) class ZeroShotAudioClassificationPipeline(Pipeline): """ Zero shot audio classification pipeline using `ClapModel`. This pipeline predicts the class of an audio when you provide an audio and a set of `candidate_labels`. Example: ```python >>> from transformers import pipeline >>> from datasets import load_dataset >>> dataset = load_dataset("ashraq/esc50") >>> audio = next(iter(dataset["train"]["audio"]))["array"] >>> classifier = pipeline(task="zero-shot-audio-classification", model="laion/clap-htsat-unfused") >>> classifier(audio, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"]) [{'score': 0.9996, 'label': 'Sound of a dog'}, {'score': 0.0004, 'label': 'Sound of vaccum cleaner'}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This audio classification pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"zero-shot-audio-classification"`. See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=zero-shot-audio-classification). """ def __init__(self, **kwargs): super().__init__(**kwargs) if self.framework != "pt": raise ValueError(f"The {self.__class__} is only available in PyTorch.") # No specific FOR_XXX available yet def __call__(self, audios: Union[np.ndarray, bytes, str], **kwargs): """ Assign labels to the audio(s) passed as inputs. Args: audios (`str`, `List[str]`, `np.array` or `List[np.array]`): The pipeline handles three types of inputs: - A string containing a http link pointing to an audio - A string containing a local path to an audio - An audio loaded in numpy candidate_labels (`List[str]`): The candidate labels for this audio hypothesis_template (`str`, *optional*, defaults to `"This is a sound of {}"`): The sentence used in cunjunction with *candidate_labels* to attempt the audio classification by replacing the placeholder with the candidate_labels. Then likelihood is estimated by using logits_per_audio Return: A list of dictionaries containing result, one dictionary per proposed label. The dictionaries contain the following keys: - **label** (`str`) -- The label identified by the model. It is one of the suggested `candidate_label`. - **score** (`float`) -- The score attributed by the model for that label (between 0 and 1). """ return super().__call__(audios, **kwargs) def _sanitize_parameters(self, **kwargs): preprocess_params = {} if "candidate_labels" in kwargs: preprocess_params["candidate_labels"] = kwargs["candidate_labels"] if "hypothesis_template" in kwargs: preprocess_params["hypothesis_template"] = kwargs["hypothesis_template"] return preprocess_params, {}, {} def preprocess(self, audio, candidate_labels=None, hypothesis_template="This is a sound of {}."): if isinstance(audio, str): if audio.startswith("http://") or audio.startswith("https://"): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png audio = requests.get(audio).content else: with open(audio, "rb") as f: audio = f.read() if isinstance(audio, bytes): audio = ffmpeg_read(audio, self.feature_extractor.sampling_rate) if not isinstance(audio, np.ndarray): raise ValueError("We expect a numpy ndarray as input") if len(audio.shape) != 1: raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline") inputs = self.feature_extractor( [audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors="pt" ) inputs["candidate_labels"] = candidate_labels sequences = [hypothesis_template.format(x) for x in candidate_labels] text_inputs = self.tokenizer(sequences, return_tensors=self.framework, padding=True) inputs["text_inputs"] = [text_inputs] return inputs def _forward(self, model_inputs): candidate_labels = model_inputs.pop("candidate_labels") text_inputs = model_inputs.pop("text_inputs") if isinstance(text_inputs[0], UserDict): text_inputs = text_inputs[0] else: # Batching case. text_inputs = text_inputs[0][0] outputs = self.model(**text_inputs, **model_inputs) model_outputs = { "candidate_labels": candidate_labels, "logits": outputs.logits_per_audio, } return model_outputs def postprocess(self, model_outputs): candidate_labels = model_outputs.pop("candidate_labels") logits = model_outputs["logits"][0] if self.framework == "pt": probs = logits.softmax(dim=0) scores = probs.tolist() else: raise ValueError("`tf` framework not supported.") result = [ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(scores, candidate_labels), key=lambda x: -x[0]) ] return result
6,501
40.679487
117
py
transformers
transformers-main/src/transformers/generation/flax_utils.py
# coding=utf-8 # Copyright 2021 The Google AI Flax Team Authors, and The HuggingFace Inc. team. # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import warnings from functools import partial from typing import Any, Dict, Optional, Union import flax import jax import jax.numpy as jnp import numpy as np from jax import lax from ..models.auto import ( FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING, ) from ..utils import ModelOutput, logging from .configuration_utils import GenerationConfig from .flax_logits_process import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxForceTokensLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxSuppressTokensAtBeginLogitsProcessor, FlaxSuppressTokensLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) logger = logging.get_logger(__name__) @flax.struct.dataclass class FlaxGreedySearchOutput(ModelOutput): """ Flax Base class for outputs of decoder-only generation models using greedy search. Args: sequences (`jnp.ndarray` of shape `(batch_size, max_length)`): The generated sequences. """ sequences: jnp.ndarray = None @flax.struct.dataclass class FlaxSampleOutput(ModelOutput): """ Flax Base class for outputs of decoder-only generation models using sampling. Args: sequences (`jnp.ndarray` of shape `(batch_size, max_length)`): The generated sequences. """ sequences: jnp.ndarray = None @flax.struct.dataclass class FlaxBeamSearchOutput(ModelOutput): """ Flax Base class for outputs of decoder-only generation models using greedy search. Args: sequences (`jnp.ndarray` of shape `(batch_size, max_length)`): The generated sequences. scores (`jnp.ndarray` of shape `(batch_size,)`): The scores (log probabilities) of the generated sequences. """ sequences: jnp.ndarray = None scores: jnp.ndarray = None @flax.struct.dataclass class GreedyState: cur_len: jnp.ndarray sequences: jnp.ndarray running_token: jnp.ndarray is_sent_finished: jnp.ndarray model_kwargs: Dict[str, jnp.ndarray] @flax.struct.dataclass class SampleState: cur_len: jnp.ndarray sequences: jnp.ndarray running_token: jnp.ndarray is_sent_finished: jnp.ndarray prng_key: jnp.ndarray model_kwargs: Dict[str, jnp.ndarray] @flax.struct.dataclass class BeamSearchState: cur_len: jnp.ndarray running_sequences: jnp.ndarray running_scores: jnp.ndarray sequences: jnp.ndarray scores: jnp.ndarray is_sent_finished: jnp.ndarray model_kwargs: Dict[str, jnp.ndarray] class FlaxGenerationMixin: """ A class containing all functions for auto-regressive text generation, to be used as a mixin in [`FlaxPreTrainedModel`]. The class exposes [`~generation.FlaxGenerationMixin.generate`], which can be used for: - *greedy decoding* by calling [`~generation.FlaxGenerationMixin._greedy_search`] if `num_beams=1` and `do_sample=False` - *multinomial sampling* by calling [`~generation.FlaxGenerationMixin._sample`] if `num_beams=1` and `do_sample=True` - *beam-search decoding* by calling [`~generation.FlaxGenerationMixin._beam_search`] if `num_beams>1` and `do_sample=False` You do not need to call any of the above methods directly. Pass custom parameter values to 'generate' instead. To learn more about decoding strategies refer to the [text generation strategies guide](../generation_strategies). """ def prepare_inputs_for_generation(self, *args, **kwargs): raise NotImplementedError( "A model class needs to define a `prepare_inputs_for_generation` method in order to use `generate`." ) @staticmethod def _run_loop_in_debug(cond_fn, body_fn, init_state): """ Run generation in untraced mode. This should only be used for debugging purposes. """ state = init_state while cond_fn(state): state = body_fn(state) return state def _prepare_encoder_decoder_kwargs_for_generation(self, input_ids, params, model_kwargs): encoder_kwargs = { argument: value for argument, value in model_kwargs.items() if not (argument.startswith("decoder_") or argument.startswith("cross_attn")) } model_kwargs["encoder_outputs"] = self.encode(input_ids, params=params, return_dict=True, **encoder_kwargs) return model_kwargs def _prepare_decoder_input_ids_for_generation( self, batch_size: int, decoder_start_token_id: int = None, bos_token_id: int = None, model_kwargs: Optional[Dict[str, jnp.ndarray]] = None, ) -> jnp.ndarray: if model_kwargs is not None and "decoder_input_ids" in model_kwargs: # Only use this arg if not None, otherwise just remove from model_kwargs decoder_input_ids = model_kwargs.pop("decoder_input_ids") if decoder_input_ids is not None: return decoder_input_ids decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id) return jnp.array(decoder_start_token_id, dtype="i4").reshape(1, -1).repeat(batch_size, axis=0) def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int: # retrieve decoder_start_token_id for encoder-decoder models # fall back to bos_token_id if necessary decoder_start_token_id = ( decoder_start_token_id if decoder_start_token_id is not None else self.generation_config.decoder_start_token_id ) bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id if decoder_start_token_id is not None: return decoder_start_token_id elif ( hasattr(self.config, "decoder") and hasattr(self.config.decoder, "decoder_start_token_id") and self.config.decoder.decoder_start_token_id is not None ): return self.config.decoder.decoder_start_token_id elif bos_token_id is not None: return bos_token_id elif ( hasattr(self.config, "decoder") and hasattr(self.config.decoder, "bos_token_id") and self.config.decoder.bos_token_id is not None ): return self.config.decoder.bos_token_id raise ValueError( "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation." ) @staticmethod def _expand_to_num_beams(tensor, num_beams): return jnp.broadcast_to(tensor[:, None], (tensor.shape[0], num_beams) + tensor.shape[1:]) def _adapt_logits_for_beam_search(self, logits): """ This function can be overwritten in the specific modeling_flax_<model-name>.py classes to allow for custom beam search behavior. Note that the only model that overwrites this method is [`~transformes.FlaxMarianMTModel`]. """ return logits def _validate_model_class(self): """ Confirms that the model class is compatible with generation. If not, raises an exception that points to the right class to use. """ if not self.can_generate(): generate_compatible_mappings = [ FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, ] generate_compatible_classes = set() for model_mapping in generate_compatible_mappings: supported_models = model_mapping.get(type(self.config), default=None) if supported_models is not None: generate_compatible_classes.add(supported_models.__name__) exception_message = ( f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as " "it doesn't have a language model head." ) if generate_compatible_classes: exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}" raise TypeError(exception_message) def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]): """Validates model kwargs for generation. Generate argument typos will also be caught here.""" unused_model_args = [] model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters) # `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If # `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;) if "kwargs" in model_args or "model_kwargs" in model_args: model_args |= set(inspect.signature(self.__call__).parameters) for key, value in model_kwargs.items(): if value is not None and key not in model_args: unused_model_args.append(key) if unused_model_args: raise ValueError( f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the" " generate arguments will also show up in this list)" ) def generate( self, input_ids: jnp.ndarray, generation_config: Optional[GenerationConfig] = None, prng_key: Optional[jnp.ndarray] = None, trace: bool = True, params: Optional[Dict[str, jnp.ndarray]] = None, logits_processor: Optional[FlaxLogitsProcessorList] = None, **kwargs, ): r""" Generates sequences of token ids for models with a language modeling head. Parameters: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which had the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. trace (`bool`, *optional*, defaults to `True`): Whether to trace generation. Setting `trace=False` should only be used for debugging and will lead to a considerably slower runtime. params (`Dict[str, jnp.ndarray]`, *optional*): Optionally the model parameters can be passed. Can be useful for parallelized generation. logits_processor (`FlaxLogitsProcessorList `, *optional*): Custom logits processors that complement the default logits processors built from arguments and generation config. If a logit processor is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. kwargs (`Dict[str, Any]`, *optional*): Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. Return: [`~utils.ModelOutput`]. """ # Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call self._validate_model_class() # priority: `generation_config` argument > `model.generation_config` (the default generation config) if generation_config is None: # legacy: users may modify the model configuration to control generation -- update the generation config # model attribute accordingly, if it was created from the model config if self.generation_config._from_model_config: new_generation_config = GenerationConfig.from_model_config(self.config) if new_generation_config != self.generation_config: warnings.warn( "You have modified the pretrained model configuration to control generation. This is a" " deprecated strategy to control generation and will be removed soon, in a future version." " Please use a generation configuration file (see" " https://huggingface.co/docs/transformers/main_classes/text_generation )" ) self.generation_config = new_generation_config generation_config = self.generation_config generation_config = copy.deepcopy(generation_config) model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs generation_config.validate() self._validate_model_kwargs(model_kwargs.copy()) logits_processor = logits_processor if logits_processor is not None else FlaxLogitsProcessorList() # set init values prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0) if generation_config.pad_token_id is None and generation_config.eos_token_id is not None: if model_kwargs.get("attention_mask") is None: logger.warning( "The attention mask and the pad token id were not set. As a consequence, you may observe " "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results." ) eos_token_id = generation_config.eos_token_id if isinstance(eos_token_id, list): eos_token_id = eos_token_id[0] logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.") generation_config.pad_token_id = eos_token_id if generation_config.decoder_start_token_id is None and self.config.is_encoder_decoder: raise ValueError("`decoder_start_token_id` has to be defined for encoder-decoder generation.") # decoder-only models should use left-padding for generation (can't be checked with `trace=True`) if not self.config.is_encoder_decoder and not trace: if ( generation_config.pad_token_id is not None and jnp.sum(input_ids[:, -1] == generation_config.pad_token_id) > 0 ): logger.warning( "A decoder-only architecture is being used, but right-padding was detected! For correct " "generation results, please set `padding_side='left'` when initializing the tokenizer." ) batch_size = input_ids.shape[0] if self.config.is_encoder_decoder: # add encoder_outputs to model_kwargs if model_kwargs.get("encoder_outputs") is None: model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(input_ids, params, model_kwargs) # prepare decoder_input_ids for generation input_ids = self._prepare_decoder_input_ids_for_generation( batch_size, decoder_start_token_id=generation_config.decoder_start_token_id, bos_token_id=generation_config.bos_token_id, model_kwargs=model_kwargs, ) # Prepare `max_length` depending on other stopping criteria. input_ids_seq_length = input_ids.shape[-1] has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None if has_default_max_length and generation_config.max_new_tokens is None: warnings.warn( f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. " "This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we" " recommend using `max_new_tokens` to control the maximum length of the generation.", UserWarning, ) elif generation_config.max_new_tokens is not None: if not has_default_max_length: logger.warning( f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=" f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. " "Please refer to the documentation for more information. " "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)" ) generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length if generation_config.min_length is not None and generation_config.min_length > generation_config.max_length: raise ValueError( f"Unfeasable length constraints: the minimum length ({generation_config.min_length}) is larger than" f" the maximum length ({generation_config.max_length})" ) if input_ids_seq_length >= generation_config.max_length: input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids" logger.warning( f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to" f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider" " increasing`max_new_tokens`." ) logits_processor = self._get_logits_processor( generation_config=generation_config, input_ids_seq_length=input_ids_seq_length, logits_processor=logits_processor, ) if not generation_config.do_sample and generation_config.num_beams == 1: return self._greedy_search( input_ids, generation_config.max_length, generation_config.pad_token_id, generation_config.eos_token_id, logits_processor=logits_processor, trace=trace, params=params, model_kwargs=model_kwargs, ) elif generation_config.do_sample and generation_config.num_beams == 1: logits_warper = self._get_logits_warper(generation_config=generation_config) return self._sample( input_ids, generation_config.max_length, generation_config.pad_token_id, generation_config.eos_token_id, prng_key, logits_warper=logits_warper, logits_processor=logits_processor, trace=trace, params=params, model_kwargs=model_kwargs, ) elif not generation_config.do_sample and generation_config.num_beams > 1: # broadcast input_ids & encoder_outputs input_ids = self._expand_to_num_beams(input_ids, num_beams=generation_config.num_beams) if "encoder_outputs" in model_kwargs: model_kwargs["encoder_outputs"]["last_hidden_state"] = self._expand_to_num_beams( model_kwargs["encoder_outputs"]["last_hidden_state"], num_beams=generation_config.num_beams ) for kwarg in ["attention_mask", "decoder_attention_mask"]: if kwarg in model_kwargs: model_kwargs[kwarg] = self._expand_to_num_beams( model_kwargs[kwarg], num_beams=generation_config.num_beams ) return self._beam_search( input_ids, generation_config.max_length, generation_config.pad_token_id, generation_config.eos_token_id, length_penalty=generation_config.length_penalty, early_stopping=generation_config.early_stopping, logits_processor=logits_processor, trace=trace, params=params, num_return_sequences=generation_config.num_return_sequences, model_kwargs=model_kwargs, ) else: raise NotImplementedError("`Beam sampling is currently not implemented.") def _get_logits_warper(self, generation_config: GenerationConfig) -> FlaxLogitsProcessorList: """ This class returns a [`FlaxLogitsProcessorList`] list object that contains all relevant [`FlaxLogitsWarper`] instances used for multinomial sampling. """ warpers = FlaxLogitsProcessorList() if generation_config.temperature is not None and generation_config.temperature != 1.0: warpers.append(FlaxTemperatureLogitsWarper(generation_config.temperature)) if generation_config.top_k is not None and generation_config.top_k != 0: warpers.append(FlaxTopKLogitsWarper(top_k=generation_config.top_k, min_tokens_to_keep=1)) if generation_config.top_p is not None and generation_config.top_p < 1.0: warpers.append(FlaxTopPLogitsWarper(top_p=generation_config.top_p, min_tokens_to_keep=1)) return warpers def _get_logits_processor( self, generation_config: GenerationConfig, input_ids_seq_length: int, logits_processor: Optional[FlaxLogitsProcessorList], ) -> FlaxLogitsProcessorList: """ This class returns a [`FlaxLogitsProcessorList`] list object that contains all relevant [`FlaxLogitsProcessor`] instances used to modify the scores of the language model head. """ processors = FlaxLogitsProcessorList() if ( generation_config.min_length is not None and generation_config.eos_token_id is not None and generation_config.min_length > -1 ): processors.append( FlaxMinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id) ) if generation_config.forced_bos_token_id is not None: processors.append(FlaxForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id)) if generation_config.forced_eos_token_id is not None: processors.append( FlaxForcedEOSTokenLogitsProcessor(generation_config.max_length, generation_config.forced_eos_token_id) ) if generation_config.suppress_tokens is not None: processors.append(FlaxSuppressTokensLogitsProcessor(generation_config.suppress_tokens)) if generation_config.begin_suppress_tokens is not None: begin_index = input_ids_seq_length begin_index = ( begin_index if (input_ids_seq_length > 1 or generation_config.forced_bos_token_id is None) else begin_index + 1 ) if generation_config.forced_decoder_ids is not None and len(generation_config.forced_decoder_ids) > 0: # generation starts after the last token that is forced begin_index += generation_config.forced_decoder_ids[-1][0] processors.append( FlaxSuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index) ) if generation_config.forced_decoder_ids is not None: forced_decoder_ids = [ [input_ids_seq_length + i[0] - 1, i[1]] for i in generation_config.forced_decoder_ids ] processors.append(FlaxForceTokensLogitsProcessor(forced_decoder_ids)) processors = self._merge_criteria_processor_list(processors, logits_processor) return processors def _merge_criteria_processor_list( self, default_list: FlaxLogitsProcessorList, custom_list: FlaxLogitsProcessorList, ) -> FlaxLogitsProcessorList: if len(custom_list) == 0: return default_list for default in default_list: for custom in custom_list: if type(custom) is type(default): object_type = "logits processor" raise ValueError( f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to" f" `generate`, but it has already been created with the values {default}. {default} has been" " created by passing the corresponding arguments to generate or by the model's config default" f" values. If you just want to change the default values of {object_type} consider passing" f" them as arguments to `generate` instead of using a custom {object_type}." ) default_list.extend(custom_list) return default_list def _greedy_search( self, input_ids: None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, logits_processor: Optional[FlaxLogitsProcessorList] = None, trace: bool = True, params: Optional[Dict[str, jnp.ndarray]] = None, model_kwargs: Optional[Dict[str, jnp.ndarray]] = None, ): # init values max_length = max_length if max_length is not None else self.generation_config.max_length pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id batch_size, cur_len = input_ids.shape eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None) pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32) cur_len = jnp.array(cur_len) # per batch-item holding current token in loop. sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32) sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0)) # per batch-item state bit indicating if sentence has finished. is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_) # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop # and pass it the `encoder_outputs`, which are part of the `model_kwargs`. model = self.decode if self.config.is_encoder_decoder else self # initialize model specific kwargs model_kwargs = self.prepare_inputs_for_generation(input_ids, max_length, **model_kwargs) # initialize state state = GreedyState( cur_len=cur_len, sequences=sequences, running_token=input_ids, is_sent_finished=is_sent_finished, model_kwargs=model_kwargs, ) def greedy_search_cond_fn(state): """state termination condition fn.""" has_reached_max_length = state.cur_len == max_length all_sequence_finished = jnp.all(state.is_sent_finished) finish_generation = jnp.logical_or(has_reached_max_length, all_sequence_finished) return ~finish_generation def greedy_search_body_fn(state): """state update fn.""" model_outputs = model(state.running_token, params=params, **state.model_kwargs) logits = model_outputs.logits[:, -1] # apply min_length, ... logits = logits_processor(state.sequences, logits, state.cur_len) next_token = jnp.argmax(logits, axis=-1) next_token = next_token * ~state.is_sent_finished + pad_token_id * state.is_sent_finished next_is_sent_finished = state.is_sent_finished | (next_token == eos_token_id) next_token = next_token[:, None] next_sequences = lax.dynamic_update_slice(state.sequences, next_token, (0, state.cur_len)) next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs) return GreedyState( cur_len=state.cur_len + 1, sequences=next_sequences, running_token=next_token, is_sent_finished=next_is_sent_finished, model_kwargs=next_model_kwargs, ) # The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU if input_ids.shape[1] > 1: state = greedy_search_body_fn(state) if not trace: state = self._run_loop_in_debug(greedy_search_cond_fn, greedy_search_body_fn, state) else: state = lax.while_loop(greedy_search_cond_fn, greedy_search_body_fn, state) return FlaxGreedySearchOutput(sequences=state.sequences) def _sample( self, input_ids: None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, prng_key: Optional[jnp.ndarray] = None, logits_processor: Optional[FlaxLogitsProcessorList] = None, logits_warper: Optional[FlaxLogitsProcessorList] = None, trace: bool = True, params: Optional[Dict[str, jnp.ndarray]] = None, model_kwargs: Optional[Dict[str, jnp.ndarray]] = None, ): # init values max_length = max_length if max_length is not None else self.generation_config.max_length pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0) batch_size, cur_len = input_ids.shape eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None) pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32) cur_len = jnp.array(cur_len) # per batch-item holding current token in loop. sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32) sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0)) # per batch-item state bit indicating if sentence has finished. is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_) # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop # and pass it the `encoder_outputs`, which are part of the `model_kwargs`. model = self.decode if self.config.is_encoder_decoder else self # initialize model specific kwargs model_kwargs = self.prepare_inputs_for_generation(input_ids, max_length, **model_kwargs) # initialize state state = SampleState( cur_len=cur_len, sequences=sequences, running_token=input_ids, is_sent_finished=is_sent_finished, prng_key=prng_key, model_kwargs=model_kwargs, ) def sample_search_cond_fn(state): """state termination condition fn.""" has_reached_max_length = state.cur_len == max_length all_sequence_finished = jnp.all(state.is_sent_finished) finish_generation = jnp.logical_or(has_reached_max_length, all_sequence_finished) return ~finish_generation def sample_search_body_fn(state): """state update fn.""" prng_key, prng_key_next = jax.random.split(state.prng_key) model_outputs = model(state.running_token, params=params, **state.model_kwargs) logits = model_outputs.logits[:, -1] # apply min_length, ... logits = logits_processor(state.sequences, logits, state.cur_len) # apply top_p, top_k, temperature logits = logits_warper(logits, logits, state.cur_len) next_token = jax.random.categorical(prng_key, logits, axis=-1) next_is_sent_finished = state.is_sent_finished | (next_token == eos_token_id) next_token = next_token * ~next_is_sent_finished + pad_token_id * next_is_sent_finished next_token = next_token[:, None] next_sequences = lax.dynamic_update_slice(state.sequences, next_token, (0, state.cur_len)) next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs) return SampleState( cur_len=state.cur_len + 1, sequences=next_sequences, running_token=next_token, is_sent_finished=next_is_sent_finished, model_kwargs=next_model_kwargs, prng_key=prng_key_next, ) # The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU if input_ids.shape[1] > 1: state = sample_search_body_fn(state) if not trace: state = self._run_loop_in_debug(sample_search_cond_fn, sample_search_body_fn, state) else: state = lax.while_loop(sample_search_cond_fn, sample_search_body_fn, state) return FlaxSampleOutput(sequences=state.sequences) def _beam_search( self, input_ids: None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, length_penalty: Optional[float] = None, early_stopping: Optional[Union[bool, str]] = None, logits_processor: Optional[FlaxLogitsProcessorList] = None, trace: bool = True, params: Optional[Dict[str, jnp.ndarray]] = None, num_return_sequences: Optional[int] = None, model_kwargs: Optional[Dict[str, jnp.ndarray]] = None, ): """ This beam search function is heavily inspired by Flax's official example: https://github.com/google/flax/blob/main/examples/wmt/decode.py """ def flatten_beam_dim(tensor): """Flattens the first two dimensions of a non-scalar array.""" # ignore scalars (e.g. cache index) if tensor.ndim == 0: return tensor return tensor.reshape((tensor.shape[0] * tensor.shape[1],) + tensor.shape[2:]) def unflatten_beam_dim(tensor, batch_size, num_beams): """Unflattens the first, flat batch*beam dimension of a non-scalar array.""" # ignore scalars (e.g. cache index) if tensor.ndim == 0: return tensor return tensor.reshape((batch_size, num_beams) + tensor.shape[1:]) def gather_beams(nested, beam_indices, batch_size, new_num_beams): """ Gathers the beam slices indexed by beam_indices into new beam array. """ batch_indices = jnp.reshape( jnp.arange(batch_size * new_num_beams) // new_num_beams, (batch_size, new_num_beams) ) def gather_fn(tensor): # ignore scalars (e.g. cache index) if tensor.ndim == 0: return tensor else: return tensor[batch_indices, beam_indices] return jax.tree_util.tree_map(gather_fn, nested) # init values max_length = max_length if max_length is not None else self.generation_config.max_length pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id length_penalty = length_penalty if length_penalty is not None else self.generation_config.length_penalty early_stopping = early_stopping if early_stopping is not None else self.generation_config.early_stopping num_return_sequences = ( num_return_sequences if num_return_sequences is not None else self.generation_config.num_return_sequences ) batch_size, num_beams, cur_len = input_ids.shape eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None) pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32) cur_len = jnp.array(cur_len) # per batch,beam-item holding current token in loop. sequences = jnp.full((batch_size, num_beams, max_length), pad_token_id, dtype=jnp.int32) running_sequences = jnp.full((batch_size, num_beams, max_length), pad_token_id, dtype=jnp.int32) running_sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0, 0)) # per batch,beam-item state bit indicating if sentence has finished. is_sent_finished = jnp.zeros((batch_size, num_beams), dtype=jnp.bool_) # per batch,beam-item score, logprobs running_scores = jnp.tile(jnp.array([0.0] + [np.array(-1.0e7)] * (num_beams - 1)), [batch_size, 1]) scores = jnp.ones((batch_size, num_beams)) * np.array(-1.0e7) # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop # and pass it the `encoder_outputs`, which are part of the `model_kwargs`. model = self.decode if self.config.is_encoder_decoder else self # flatten beam dim if "encoder_outputs" in model_kwargs: model_kwargs["encoder_outputs"]["last_hidden_state"] = flatten_beam_dim( model_kwargs["encoder_outputs"]["last_hidden_state"] ) for kwarg in ["attention_mask", "decoder_attention_mask"]: if kwarg in model_kwargs: model_kwargs[kwarg] = flatten_beam_dim(model_kwargs[kwarg]) # initialize model specific kwargs model_kwargs = self.prepare_inputs_for_generation(flatten_beam_dim(input_ids), max_length, **model_kwargs) # initialize state state = BeamSearchState( cur_len=cur_len, running_sequences=running_sequences, running_scores=running_scores, sequences=sequences, scores=scores, is_sent_finished=is_sent_finished, model_kwargs=model_kwargs, ) def beam_search_cond_fn(state): """beam search state termination condition fn.""" # 1. is less than max length? not_max_length_yet = state.cur_len < max_length # 2. can the new beams still improve? # early_stopping == False -> apply heuristic = always get the best score from `cur_len`. See the discussion # below for more details. # https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565 # early_stopping == "never" -> compute the best score from max_length or cur_len, depending on the sign of # length_penalty. Positive length_penalty favors longer sequences, thus we use max_length there. if early_stopping == "never" and length_penalty > 0.0: best_running_score = state.running_scores[:, :1] / (max_length**length_penalty) else: best_running_score = state.running_scores[:, :1] / (state.cur_len**length_penalty) worst_finished_score = jnp.where( state.is_sent_finished, jnp.min(state.scores, axis=1, keepdims=True), np.array(-1.0e7) ) improvement_still_possible = jnp.any(best_running_score > worst_finished_score) # 3. is there still a beam that has not finished? still_open_beam = ~(jnp.all(state.is_sent_finished) & (early_stopping is True)) return not_max_length_yet & still_open_beam & improvement_still_possible def beam_search_body_fn(state, input_ids_length=1): """beam search state update fn.""" # 1. Forward current tokens # Collect the current position slice along length to feed the fast # autoregressive decoder model. Flatten the beam dimension into batch # dimension for feeding into the model. # unflatten beam dimension # Unflatten beam dimension in attention cache arrays input_token = flatten_beam_dim( lax.dynamic_slice( state.running_sequences, (0, 0, state.cur_len - input_ids_length), (batch_size, num_beams, input_ids_length), ) ) model_outputs = model(input_token, params=params, **state.model_kwargs) logits = unflatten_beam_dim(model_outputs.logits[:, -1], batch_size, num_beams) cache = jax.tree_util.tree_map( lambda tensor: unflatten_beam_dim(tensor, batch_size, num_beams), model_outputs.past_key_values ) # adapt logits for FlaxMarianMTModel logits = self._adapt_logits_for_beam_search(logits) # 2. Compute log probs # get log probabilities from logits, # process logits with processors (*e.g.* min_length, ...), and # add new logprobs to existing running logprobs scores. log_probs = jax.nn.log_softmax(logits) log_probs = logits_processor( flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs), state.cur_len ) log_probs = unflatten_beam_dim(log_probs, batch_size, num_beams) log_probs = log_probs + jnp.expand_dims(state.running_scores, axis=2) vocab_size = log_probs.shape[2] log_probs = log_probs.reshape((batch_size, num_beams * vocab_size)) # 3. Retrieve top-K # Each item in batch has num_beams * vocab_size candidate sequences. # For each item, get the top 2*k candidates with the highest log- # probabilities. We gather the top 2*K beams here so that even if the best # K sequences reach EOS simultaneously, we have another K sequences # remaining to continue the live beam search. # Gather the top 2*K scores from _all_ beams. # Gather 2*k top beams. # Recover the beam index by floor division. # Recover token id by modulo division and expand Id array for broadcasting. # Update sequences for the 2*K top-k new sequences. beams_to_keep = 2 * num_beams topk_log_probs, topk_indices = lax.top_k(log_probs, k=beams_to_keep) topk_beam_indices = topk_indices // vocab_size topk_running_sequences = gather_beams( state.running_sequences, topk_beam_indices, batch_size, beams_to_keep ) topk_ids = jnp.expand_dims(topk_indices % vocab_size, axis=2) topk_sequences = lax.dynamic_update_slice(topk_running_sequences, topk_ids, (0, 0, state.cur_len)) # 4. Check which sequences have ended # Update current sequences: # Did any of these sequences reach an end marker? # To prevent these just finished sequences from being added to the current sequences # set of active beam search sequences, set their log probs to a very large # negative value. did_topk_just_finished = topk_sequences[:, :, state.cur_len] == eos_token_id running_topk_log_probs = topk_log_probs + did_topk_just_finished * np.array(-1.0e7) # 5. Get running sequences scores for next # Determine the top k beam indices (from top 2*k beams) from log probs # and gather top k beams (from top 2*k beams). next_topk_indices = lax.top_k(running_topk_log_probs, k=num_beams)[1] next_running_sequences, next_running_scores = gather_beams( [topk_sequences, running_topk_log_probs], next_topk_indices, batch_size, num_beams ) # 6. Process topk logits # Further process log probs: # - add length penalty # - make sure no scores can be added anymore if beam is full # - make sure still running sequences cannot be chosen as finalized beam topk_log_probs = topk_log_probs / (state.cur_len**length_penalty) beams_in_batch_are_full = jnp.broadcast_to( state.is_sent_finished.all(axis=-1, keepdims=True), did_topk_just_finished.shape ) & (early_stopping is True) add_penalty = ~did_topk_just_finished | beams_in_batch_are_full topk_log_probs += add_penalty * np.array(-1.0e7) # 7. Get scores, sequences, is sentence finished for next. # Combine sequences, scores, and flags along the beam dimension and compare # new finished sequence scores to existing finished scores and select the # best from the new set of beams merged_sequences = jnp.concatenate([state.sequences, topk_sequences], axis=1) merged_scores = jnp.concatenate([state.scores, topk_log_probs], axis=1) merged_is_sent_finished = jnp.concatenate([state.is_sent_finished, did_topk_just_finished], axis=1) topk_merged_indices = lax.top_k(merged_scores, k=num_beams)[1] next_sequences, next_scores, next_is_sent_finished = gather_beams( [merged_sequences, merged_scores, merged_is_sent_finished], topk_merged_indices, batch_size, num_beams ) # 8. Update model kwargs. # Determine the top k beam indices from the original set of all beams. # With these, gather the top k beam-associated caches. next_running_indices = gather_beams(topk_beam_indices, next_topk_indices, batch_size, num_beams) next_cache = gather_beams(cache, next_running_indices, batch_size, num_beams) model_outputs["past_key_values"] = jax.tree_util.tree_map(lambda x: flatten_beam_dim(x), next_cache) next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs) return BeamSearchState( cur_len=state.cur_len + 1, running_scores=next_running_scores, running_sequences=next_running_sequences, scores=next_scores, sequences=next_sequences, is_sent_finished=next_is_sent_finished, model_kwargs=next_model_kwargs, ) # The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU if input_ids.shape[-1] > 1: state = partial(beam_search_body_fn, input_ids_length=input_ids.shape[-1])(state) if not trace: state = self._run_loop_in_debug(beam_search_cond_fn, beam_search_body_fn, state) else: state = lax.while_loop(beam_search_cond_fn, beam_search_body_fn, state) # Account for the edge-case where there are no finished sequences for a # particular batch item. If so, return running sequences for that batch item. none_finished = jnp.any(state.is_sent_finished, axis=1) sequences = jnp.where(none_finished[:, None, None], state.sequences, state.running_sequences) scores = jnp.where(none_finished[:, None], state.scores, state.running_scores) # Take best beams for each batch (the score is sorted in descending order) sequences = flatten_beam_dim(sequences[:, :num_return_sequences, :]) scores = flatten_beam_dim(scores[:, :num_return_sequences]) return FlaxBeamSearchOutput(sequences=sequences, scores=scores)
49,096
47.659068
119
py
transformers
transformers-main/src/transformers/generation/tf_utils.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import warnings from dataclasses import dataclass from typing import Any, Dict, Optional, Tuple, Union import numpy as np import tensorflow as tf from tensorflow.compiler.tf2xla.python.xla import dynamic_update_slice from ..modeling_tf_outputs import TFCausalLMOutputWithPast, TFSeq2SeqLMOutput from ..models.auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, TF_MODEL_FOR_VISION_2_SEQ_MAPPING, ) from ..tf_utils import shape_list, stable_softmax from ..utils import ModelOutput, logging from .configuration_utils import GenerationConfig from .tf_logits_process import ( TFForcedBOSTokenLogitsProcessor, TFForcedEOSTokenLogitsProcessor, TFForceTokensLogitsProcessor, TFLogitsProcessorList, TFMinLengthLogitsProcessor, TFNoBadWordsLogitsProcessor, TFNoRepeatNGramLogitsProcessor, TFRepetitionPenaltyLogitsProcessor, TFSuppressTokensAtBeginLogitsProcessor, TFSuppressTokensLogitsProcessor, TFTemperatureLogitsWarper, TFTopKLogitsWarper, TFTopPLogitsWarper, ) logger = logging.get_logger(__name__) @dataclass class TFGreedySearchDecoderOnlyOutput(ModelOutput): """ Base class for outputs of decoder-only generation models using greedy search. Args: sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size, config.vocab_size)`. attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`. """ sequences: tf.Tensor = None scores: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None @dataclass class TFGreedySearchEncoderDecoderOutput(ModelOutput): """ Base class for outputs of encoder-decoder generation models using greedy search. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) Args: sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size, config.vocab_size)`. encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`. """ sequences: tf.Tensor = None scores: Optional[Tuple[tf.Tensor]] = None encoder_attentions: Optional[Tuple[tf.Tensor]] = None encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None @dataclass class TFSampleDecoderOnlyOutput(ModelOutput): """ Base class for outputs of decoder-only generation models using sampling. Args: sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size*num_return_sequences, config.vocab_size)`. attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(num_return_sequences*batch_size, num_heads, generated_length, sequence_length)`. hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(num_return_sequences*batch_size, generated_length, hidden_size)`. """ sequences: tf.Tensor = None scores: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None @dataclass class TFSampleEncoderDecoderOutput(ModelOutput): """ Base class for outputs of encoder-decoder generation models using sampling. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) Args: sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size*num_return_sequences, config.vocab_size)`. encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size*num_return_sequences, num_heads, sequence_length, sequence_length)`. encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size*num_return_sequences, sequence_length, hidden_size)`. decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size*num_return_sequences, num_heads, generated_length, sequence_length)`. cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size*num_return_sequences, generated_length, hidden_size)`. """ sequences: tf.Tensor = None scores: Optional[Tuple[tf.Tensor]] = None encoder_attentions: Optional[Tuple[tf.Tensor]] = None encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None @dataclass class TFBeamSearchDecoderOnlyOutput(ModelOutput): """ Base class for outputs of decoder-only generation models using beam search. Args: sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. sequences_scores (`tf.Tensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Final beam scores of the generated `sequences`. scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size*num_beams*num_return_sequences, config.vocab_size)`. beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Beam indices of generated token id at each generation step. `tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`. attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`. hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`. """ sequences: tf.Tensor = None sequences_scores: Optional[tf.Tensor] = None scores: Optional[Tuple[tf.Tensor]] = None beam_indices: Optional[tf.Tensor] = None attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None @dataclass class TFBeamSearchEncoderDecoderOutput(ModelOutput): """ Base class for outputs of encoder-decoder generation models using beam search. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) Args: sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. sequences_scores (`tf.Tensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Final beam scores of the generated `sequences`. scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam. `Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size*num_beams, config.vocab_size)`. beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Beam indices of generated token id at each generation step. `tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`. encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size*num_beams*num_return_sequences, sequence_length, hidden_size)`. decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size*num_beams*num_return_sequences, num_heads, generated_length, sequence_length)`. cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`. """ sequences: tf.Tensor = None sequences_scores: Optional[tf.Tensor] = None scores: Optional[Tuple[tf.Tensor]] = None beam_indices: Optional[tf.Tensor] = None encoder_attentions: Optional[Tuple[tf.Tensor]] = None encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None @dataclass class TFBeamSampleDecoderOnlyOutput(ModelOutput): """ Base class for outputs of decoder-only generation models using beam sample. Args: sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. sequences_scores (`tf.Tensor` of shape `(batch_size * num_return_sequence)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Final beam scores of the generated `sequences`. scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size*num_beams*num_return_sequences, config.vocab_size)`. beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Beam indices of generated token id at each generation step. `tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`. attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`. hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size*num_beams, generated_length, hidden_size)`. """ sequences: tf.Tensor = None sequences_scores: Optional[tf.Tensor] = None scores: Optional[Tuple[tf.Tensor]] = None beam_indices: Optional[tf.Tensor] = None attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None @dataclass class TFBeamSampleEncoderDecoderOutput(ModelOutput): """ Base class for outputs of encoder-decoder generation models using beam sampling. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) Args: sequences (`tf.Tensor` of shape `(batch_size*num_beams, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. sequences_scores (`tf.Tensor` of shape `(batch_size * num_return_sequence)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Final beam scores of the generated `sequences`. scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size*num_beams, config.vocab_size)`. beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Beam indices of generated token id at each generation step. `tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`. encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size*num_beams, sequence_length, hidden_size)`. decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`. cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size*num_beams, generated_length, hidden_size)`. """ sequences: tf.Tensor = None sequences_scores: Optional[tf.Tensor] = None scores: Optional[Tuple[tf.Tensor]] = None beam_indices: Optional[tf.Tensor] = None encoder_attentions: Optional[Tuple[tf.Tensor]] = None encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None @dataclass class TFContrastiveSearchDecoderOnlyOutput(ModelOutput): """ Base class for outputs of decoder-only generation models using contrastive search. Args: sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size, config.vocab_size)`. attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`. """ sequences: tf.Tensor = None scores: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None @dataclass class TFContrastiveSearchEncoderDecoderOutput(ModelOutput): """ Base class for outputs of encoder-decoder generation models using contrastive search. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) Args: sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size, config.vocab_size)`. encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`. """ sequences: tf.Tensor = None scores: Optional[Tuple[tf.Tensor]] = None encoder_attentions: Optional[Tuple[tf.Tensor]] = None encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None TFGreedySearchOutput = Union[TFGreedySearchEncoderDecoderOutput, TFGreedySearchDecoderOnlyOutput] TFSampleOutput = Union[TFSampleEncoderDecoderOutput, TFSampleDecoderOnlyOutput] TFBeamSearchOutput = Union[TFBeamSearchEncoderDecoderOutput, TFBeamSearchDecoderOnlyOutput] TFBeamSampleOutput = Union[TFBeamSampleEncoderDecoderOutput, TFBeamSampleDecoderOnlyOutput] TFContrastiveSearchOutput = Union[TFContrastiveSearchEncoderDecoderOutput, TFContrastiveSearchDecoderOnlyOutput] TFGenerateOutput = Union[ TFGreedySearchOutput, TFSampleOutput, TFBeamSearchOutput, TFBeamSampleOutput, TFContrastiveSearchOutput ] class TFGenerationMixin: """ A class containing all of the functions supporting generation, to be used as a mixin in [`TFPreTrainedModel`]. The class exposes [`~generation.TFGenerationMixin.generate`], which can be used for: - *greedy decoding* by calling [`~generation.TFGenerationMixin.greedy_search`] if `num_beams=1` and `do_sample=False` - *contrastive search* by calling [`~generation.TFGenerationMixin.contrastive_search`] if `penalty_alpha>0` and `top_k>1` - *multinomial sampling* by calling [`~generation.TFGenerationMixin.sample`] if `num_beams=1` and `do_sample=True` - *beam-search decoding* by calling [`~generation.TFGenerationMixin.beam_search`] if `num_beams>1` You do not need to call any of the above methods directly. Pass custom parameter values to 'generate' instead. To learn more about decoding strategies refer to the [text generation strategies guide](../generation_strategies). """ _seed_generator = None @property def seed_generator(self): warnings.warn("`seed_generator` is deprecated and will be removed in a future version.", UserWarning) if self._seed_generator is None: self._seed_generator = tf.random.Generator.from_non_deterministic_state() return self._seed_generator supports_xla_generation = True def prepare_inputs_for_generation(self, *args, **kwargs): raise NotImplementedError( "A model class needs to define a `prepare_inputs_for_generation` method in order to use `generate`." ) def adjust_logits_during_generation( self, logits, cur_len, max_length, forced_bos_token_id, forced_eos_token_id, **kwargs ): """ Implement in subclasses of [`PreTrainedModel`] for custom behavior to adjust the logits in the generate method. """ vocab_size = getattr(self.config, "vocab_size", None) if vocab_size is None and self.config.is_encoder_decoder: decoder_config = getattr(self.config, "decoder", None) if decoder_config is not None: vocab_size = getattr(self.config.decoder, "vocab_size", None) if cur_len == 1 and forced_bos_token_id is not None: vocab_range = tf.constant(range(vocab_size)) return tf.where(vocab_range != forced_bos_token_id, -1e8, logits) elif cur_len == max_length - 1 and forced_eos_token_id is not None: vocab_range = tf.constant(range(vocab_size)) return tf.where(vocab_range != forced_eos_token_id, -1e8, logits) else: return logits def compute_transition_scores( self, sequences: tf.Tensor, scores: Tuple[tf.Tensor], beam_indices: Optional[tf.Tensor] = None, normalize_logits: bool = False, ) -> tf.Tensor: """ Computes the transition scores of sequences given the generation scores (and beam indices, if beam search was used). This is a convenient method to quicky obtain the scores of the selected tokens at generation time. Parameters: sequences (`tf.Tensor`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. scores (`tuple(tf.Tensor)`): Transition scores for each vocabulary token at each generation step. Beam transition scores consisting of log probabilities of tokens conditioned on log softmax of previously generated tokens Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size*num_beams, config.vocab_size)`. beam_indices (`tf.Tensor`, *optional*): Beam indices of generated token id at each generation step. `tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`. Only required if a `num_beams>1` at generate-time. normalize_logits (`bool`, *optional*, defaults to `False`): Whether to normalize the logits (which, for legacy reasons, may be unnormalized). Return: `tf.Tensor`: A `tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)` containing the transition scores (logits) Examples: ```python >>> from transformers import GPT2Tokenizer, TFAutoModelForCausalLM >>> import numpy as np >>> tokenizer = GPT2Tokenizer.from_pretrained("gpt2") >>> model = TFAutoModelForCausalLM.from_pretrained("gpt2") >>> tokenizer.pad_token_id = tokenizer.eos_token_id >>> inputs = tokenizer(["Today is"], return_tensors="tf") >>> # Example 1: Print the scores for each token generated with Greedy Search >>> outputs = model.generate(**inputs, max_new_tokens=5, return_dict_in_generate=True, output_scores=True) >>> transition_scores = model.compute_transition_scores( ... outputs.sequences, outputs.scores, normalize_logits=True ... ) >>> # input_length is the length of the input prompt for decoder-only models, like the GPT family, and 1 for >>> # encoder-decoder models, like BART or T5. >>> input_length = 1 if model.config.is_encoder_decoder else inputs.input_ids.shape[1] >>> generated_tokens = outputs.sequences[:, input_length:] >>> for tok, score in zip(generated_tokens[0], transition_scores[0]): ... # | token | token string | logits | probability ... print(f"| {tok:5d} | {tokenizer.decode(tok):8s} | {score.numpy():.3f} | {np.exp(score.numpy()):.2%}") | 262 | the | -1.413 | 24.33% | 1110 | day | -2.609 | 7.36% | 618 | when | -2.009 | 13.41% | 356 | we | -1.859 | 15.58% | 460 | can | -2.508 | 8.14% >>> # Example 2: Reconstruct the sequence scores from Beam Search >>> outputs = model.generate( ... **inputs, ... max_new_tokens=5, ... num_beams=4, ... num_return_sequences=4, ... return_dict_in_generate=True, ... output_scores=True, ... ) >>> transition_scores = model.compute_transition_scores( ... outputs.sequences, outputs.scores, outputs.beam_indices, normalize_logits=False ... ) >>> # If you sum the generated tokens' scores and apply the length penalty, you'll get the sequence scores. >>> # Tip: recomputing the scores is only guaranteed to match with `normalize_logits=False`. Depending on the >>> # use case, you might want to recompute it with `normalize_logits=True`. >>> output_length = input_length + np.sum(transition_scores.numpy() < 0, axis=1) >>> length_penalty = model.generation_config.length_penalty >>> reconstructed_scores = np.sum(transition_scores, axis=1) / (output_length**length_penalty) >>> print(np.allclose(outputs.sequences_scores, reconstructed_scores)) True ```""" # 1. In absence of `beam_indices`, we can assume that we come from e.g. greedy search, which is equivalent # to a beam search approach were the first (and only) beam is always selected if beam_indices is None: beam_indices = tf.tile(tf.expand_dims(tf.range(scores[0].shape[0]), axis=1), [1, len(scores)]) # 2. reshape scores as [batch_size, vocab_size, # generation steps] with # generation steps being # seq_len - input_length scores = tf.transpose(tf.reshape(tf.stack(scores), (len(scores), -1)), (1, 0)) scores = tf.reshape(scores, (-1, self.config.vocab_size, scores.shape[-1])) # 3. Optionally normalize the logits (across the vocab dimension) if normalize_logits: scores = tf.nn.log_softmax(scores, axis=1) # 4. cut beam_indices to longest beam length beam_indices_mask = beam_indices < 0 max_beam_length = tf.math.reduce_max( tf.math.reduce_sum((1 - tf.cast(beam_indices_mask, dtype=tf.int32)), axis=-1) ) beam_indices = beam_indices[:, -max_beam_length:] beam_indices_mask = beam_indices_mask[:, -max_beam_length:] # 5. Set indices of beams that finished early to 0; such indices will be masked correctly afterwards beam_indices = tf.where(beam_indices_mask, 0, beam_indices) # 6. Define which indices contributed to scores cut_idx = sequences.shape[-1] - max_beam_length token_indices = sequences[:, cut_idx:] gen_step_idx = tf.broadcast_to(tf.range(scores.shape[-1]), token_indices.shape) indices = tf.stack([beam_indices, token_indices, gen_step_idx], axis=-1) # 7. Compute scores transition_scores = tf.gather_nd(scores, indices) # 8. Mask out transition_scores of beams that stopped early transition_scores = tf.where(beam_indices_mask, 0, transition_scores) return transition_scores def _validate_model_class(self): """ Confirms that the model class is compatible with generation. If not, raises an exception that points to the right class to use. """ if not self.can_generate(): generate_compatible_mappings = [ TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_VISION_2_SEQ_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, ] generate_compatible_classes = set() for model_mapping in generate_compatible_mappings: supported_models = model_mapping.get(type(self.config), default=None) if supported_models is not None: generate_compatible_classes.add(supported_models.__name__) exception_message = ( f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as " "it doesn't have a language model head." ) if generate_compatible_classes: exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}" raise TypeError(exception_message) def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]): """Validates model kwargs for generation. Generate argument typos will also be caught here.""" # Excludes arguments that are handled before calling any model function if self.config.is_encoder_decoder: for key in ["decoder_input_ids"]: model_kwargs.pop(key, None) unused_model_args = [] model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters) # `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If # `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;) if "kwargs" in model_args or "model_kwargs" in model_args: model_args |= set(inspect.signature(self.call).parameters) for key, value in model_kwargs.items(): if value is not None and key not in model_args: unused_model_args.append(key) if unused_model_args: raise ValueError( f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the" " generate arguments will also show up in this list)" ) def generate( self, inputs: Optional[tf.Tensor] = None, generation_config: Optional[GenerationConfig] = None, logits_processor: Optional[TFLogitsProcessorList] = None, seed=None, **kwargs, ) -> Union[TFGenerateOutput, tf.Tensor]: r""" Generates sequences of token ids for models with a language modeling head. <Tip warning={true}> Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the model's default generation configuration. You can override any `generation_config` by passing the corresponding parameters to generate, e.g. `.generate(inputs, num_beams=4, do_sample=True)`. For an overview of generation strategies and code examples, check out the [following guide](../generation_strategies). </Tip> Parameters: inputs (`tf.Tensor` of varying shape depending on the modality, *optional*): The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs` should of in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of `input_ids`, `input_values`, `input_features`, or `pixel_values`. generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which had the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. logits_processor (`LogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and generation config. If a logit processor is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. seed (`List[int]`, *optional*): Random seed to control sampling, containing two integers, used when `do_sample` is `True`. See the `seed` argument from stateless functions in `tf.random`. kwargs (`Dict[str, Any]`, *optional*): Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. Return: [`~utils.ModelOutput`] or `tf.Tensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when `config.return_dict_in_generate=True`) or a `tf.Tensor`. If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible [`~utils.ModelOutput`] types are: - [`~generation.TFGreedySearchDecoderOnlyOutput`], - [`~generation.TFSampleDecoderOnlyOutput`], - [`~generation.TFBeamSearchDecoderOnlyOutput`], - [`~generation.TFBeamSampleDecoderOnlyOutput`] If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible [`~utils.ModelOutput`] types are: - [`~generation.TFGreedySearchEncoderDecoderOutput`], - [`~generation.TFSampleEncoderDecoderOutput`], - [`~generation.TFBeamSearchEncoderDecoderOutput`], - [`~generation.TFBeamSampleEncoderDecoderOutput`] """ # 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call self._validate_model_class() # priority: `generation_config` argument > `model.generation_config` (the default generation config) if generation_config is None: # legacy: users may modify the model configuration to control generation -- update the generation config # model attribute accordingly, if it was created from the model config if self.generation_config._from_model_config: new_generation_config = GenerationConfig.from_model_config(self.config) if new_generation_config != self.generation_config: warnings.warn( "You have modified the pretrained model configuration to control generation. This is a" " deprecated strategy to control generation and will be removed soon, in a future version." " Please use a generation configuration file (see" " https://huggingface.co/docs/transformers/main_classes/text_generation )" ) self.generation_config = new_generation_config generation_config = self.generation_config generation_config = copy.deepcopy(generation_config) model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs generation_config.validate() self._validate_model_kwargs(model_kwargs.copy()) # 2. Cast input dtypes to tf.int32 unless they're floats (which happens for some image models) if inputs is not None: if isinstance(inputs, tf.Tensor) and inputs.dtype.is_floating: pass elif isinstance(inputs, np.ndarray) and np.issubdtype(inputs.dtype, np.floating): pass else: inputs = tf.cast(inputs, tf.int32) if model_kwargs.get("attention_mask") is not None: model_kwargs["attention_mask"] = tf.cast(model_kwargs["attention_mask"], tf.int32) if "decoder_input_ids" in model_kwargs: if ( isinstance(model_kwargs["decoder_input_ids"], tf.Tensor) and model_kwargs["decoder_input_ids"].dtype.is_floating ): pass elif isinstance(model_kwargs["decoder_input_ids"], np.ndarray) and np.issubdtype( model_kwargs["decoder_input_ids"].dtype, np.floating ): pass else: model_kwargs["decoder_input_ids"] = tf.cast(model_kwargs["decoder_input_ids"], tf.int32) # 3. Set generation parameters if not already defined logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList() if generation_config.pad_token_id is None and generation_config.eos_token_id is not None: if model_kwargs.get("attention_mask") is None: logger.warning( "The attention mask and the pad token id were not set. As a consequence, you may observe " "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results." ) eos_token_id = generation_config.eos_token_id if isinstance(eos_token_id, list): eos_token_id = eos_token_id[0] logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.") generation_config.pad_token_id = eos_token_id use_xla = not tf.executing_eagerly() if use_xla and not self.supports_xla_generation: raise ValueError( "The selected model does not support Graph mode nor XLA generation (e.g. from tf.function())" ) # 4. Define model inputs inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs( inputs, generation_config.bos_token_id, model_kwargs ) # inputs_ids now has to be defined and cannot be None anymore batch_size = shape_list(inputs_tensor)[0] # 5. Prepare other model kwargs model_kwargs["output_attentions"] = generation_config.output_attentions model_kwargs["output_hidden_states"] = generation_config.output_hidden_states model_kwargs["use_cache"] = generation_config.use_cache accepts_attention_mask = "attention_mask" in set(inspect.signature(self.call).parameters.keys()) requires_attention_mask = "encoder_outputs" not in model_kwargs if model_kwargs.get("attention_mask", None) is None and requires_attention_mask and accepts_attention_mask: model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation( inputs_tensor, generation_config.pad_token_id, generation_config.eos_token_id ) # decoder-only models should use left-padding for generation if not self.config.is_encoder_decoder: if generation_config.pad_token_id is not None and tf.math.reduce_any( inputs_tensor[:, -1] == generation_config.pad_token_id ): logger.warning( "A decoder-only architecture is being used, but right-padding was detected! For correct " "generation results, please set `padding_side='left'` when initializing the tokenizer." ) if self.config.is_encoder_decoder and "encoder_outputs" not in model_kwargs: # if model is encoder decoder encoder_outputs are created and added to `model_kwargs` model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation( inputs_tensor, model_kwargs, model_input_name ) # 6. Prepare model inputs which will be used for auto-regressive generation if self.config.is_encoder_decoder: input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation( batch_size=batch_size, model_input_name=model_input_name, model_kwargs=model_kwargs, decoder_start_token_id=generation_config.decoder_start_token_id, bos_token_id=generation_config.bos_token_id, ) else: input_ids = inputs_tensor if model_input_name == "input_ids" else model_kwargs.pop("input_ids") # 7. Prepare `max_length` depending on other stopping criteria. input_ids_seq_length = shape_list(input_ids)[-1] has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None if has_default_max_length and generation_config.max_new_tokens is None: warnings.warn( f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. " "This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we" " recommend using `max_new_tokens` to control the maximum length of the generation.", UserWarning, ) elif generation_config.max_new_tokens is not None: if not has_default_max_length: logger.warning( f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=" f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. " "Please refer to the documentation for more information. " "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)" ) generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length # If the input length is a tensor (i.e. dynamic length), skip length checks if not isinstance(input_ids_seq_length, tf.Tensor): if ( generation_config.min_length is not None and generation_config.min_length > generation_config.max_length ): raise ValueError( f"Unfeasable length constraints: the minimum length ({generation_config.min_length}) is larger" f" than the maximum length ({generation_config.max_length})" ) if input_ids_seq_length >= generation_config.max_length: input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids" logger.warning( f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to" f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider" " increasing`max_new_tokens`." ) # 8. determine generation mode is_contrastive_search_gen_mode = ( generation_config.top_k is not None and generation_config.top_k > 1 and generation_config.do_sample is False and generation_config.penalty_alpha is not None and generation_config.penalty_alpha > 0 ) is_greedy_gen_mode = ( not is_contrastive_search_gen_mode and (generation_config.num_beams == 1) and generation_config.do_sample is False ) is_beam_gen_mode = ( not is_contrastive_search_gen_mode and (generation_config.num_beams > 1) and generation_config.do_sample is False ) is_sample_gen_mode = (generation_config.num_beams == 1) and generation_config.do_sample is True is_beam_sample_gen_mode = (generation_config.num_beams > 1) and generation_config.do_sample is True # 9. prepare distribution pre_processing samplers logits_processor = self._get_logits_processor( generation_config=generation_config, input_ids_seq_length=input_ids_seq_length, logits_processor=logits_processor, ) # 10. go into different generation modes if is_greedy_gen_mode: if generation_config.num_return_sequences > 1: raise ValueError( f"num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing" " greedy search." ) # 11. run greedy search return self.greedy_search( input_ids, max_length=generation_config.max_length, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, logits_processor=logits_processor, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, **model_kwargs, ) elif is_contrastive_search_gen_mode: if generation_config.num_return_sequences > 1: raise ValueError( f"num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing" " contrastive search." ) # 11. run contrastive search return self.contrastive_search( input_ids, top_k=generation_config.top_k, penalty_alpha=generation_config.penalty_alpha, logits_processor=logits_processor, max_length=generation_config.max_length, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, **model_kwargs, ) elif is_sample_gen_mode: # 11. prepare logits warper logits_warper = self._get_logits_warper(generation_config=generation_config) # 12. expand input_ids with `num_return_sequences` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids=input_ids, expand_size=generation_config.num_return_sequences, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs, ) # 13. run sample return self.sample( input_ids, logits_processor=logits_processor, logits_warper=logits_warper, max_length=generation_config.max_length, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, seed=seed, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, **model_kwargs, ) elif is_beam_gen_mode: if generation_config.num_beams < generation_config.num_return_sequences: raise ValueError( "Beam search decoding cannot return more sequences than it has beams. Please set num_beams >=" f" num_return_sequences, got {generation_config.num_beams} and" f" {generation_config.num_return_sequences} (respectivelly)" ) # 11. broadcast inputs to the desired number of beams input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids=input_ids, expand_size=generation_config.num_beams, is_encoder_decoder=self.config.is_encoder_decoder, expand_in_new_axis=True, **model_kwargs, ) # 12. run beam search return self.beam_search( input_ids, max_length=generation_config.max_length, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, length_penalty=generation_config.length_penalty, early_stopping=generation_config.early_stopping, logits_processor=logits_processor, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, num_return_sequences=generation_config.num_return_sequences, **model_kwargs, ) elif is_beam_sample_gen_mode: if generation_config.num_beams < generation_config.num_return_sequences: raise ValueError( "Beam search decoding cannot return more sequences than it has beams. Please set num_beams >=" f" num_return_sequences, got {generation_config.num_beams} and" f" {generation_config.num_return_sequences} (respectivelly)" ) # 11. prepare logits warper logits_warper = self._get_logits_warper(generation_config=generation_config) # 12. broadcast inputs to the desired number of beams input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids=input_ids, expand_size=generation_config.num_beams, is_encoder_decoder=self.config.is_encoder_decoder, expand_in_new_axis=True, **model_kwargs, ) # 13. run beam sample (beam search with sampling) return self.beam_search( input_ids, do_sample=True, max_length=generation_config.max_length, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, length_penalty=generation_config.length_penalty, early_stopping=generation_config.early_stopping, logits_processor=logits_processor, logits_warper=logits_warper, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, num_return_sequences=generation_config.num_return_sequences, **model_kwargs, ) def _prepare_attention_mask_for_generation( self, inputs: tf.Tensor, pad_token_id: Optional[int], eos_token_id: Optional[int], ) -> tf.Tensor: is_input_ids = len(inputs.shape) == 2 and inputs.dtype in (tf.int32, tf.int64) is_pad_token_in_inputs = (pad_token_id is not None) and tf.math.reduce_any(inputs == pad_token_id) is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or (pad_token_id != eos_token_id) # Check if input is input_ids and padded -> only then is attention_mask defined if is_input_ids and is_pad_token_in_inputs and is_pad_token_not_equal_to_eos_token_id: return tf.cast(tf.math.not_equal(inputs, pad_token_id), dtype=tf.int32) else: return tf.ones(inputs.shape[:2], dtype=tf.int32) def _prepare_encoder_decoder_kwargs_for_generation( self, inputs_tensor: tf.Tensor, model_kwargs, model_input_name: Optional[str] = None ) -> Dict[str, Any]: # 1. get encoder and store encoder outputs encoder = self.get_encoder() # 2. prepare encoder args and encoder kwargs from model kwargs irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"] encoder_kwargs = { argument: value for argument, value in model_kwargs.items() if not any(argument.startswith(p) for p in irrelevant_prefix) } encoder_signature = set(inspect.signature(encoder.call).parameters) encoder_accepts_wildcard = "kwargs" in encoder_signature or "model_kwargs" in encoder_signature if not encoder_accepts_wildcard: encoder_kwargs = { argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature } # 3. vision models don't use `attention_mask`. encoder_kwargs["return_dict"] = True encoder_kwargs[model_input_name] = inputs_tensor if model_input_name != self.main_input_name: # in Keras, the first input must always be passed encoder_kwargs[self.main_input_name] = None encoder_outputs = encoder(**encoder_kwargs) model_kwargs["encoder_outputs"] = encoder_outputs return model_kwargs def _prepare_decoder_input_ids_for_generation( self, batch_size: int, model_input_name: str, model_kwargs: Dict[str, tf.Tensor], decoder_start_token_id: int = None, bos_token_id: int = None, ) -> Tuple[tf.Tensor, Dict[str, tf.Tensor]]: """Prepares `decoder_input_ids` for generation with encoder-decoder models""" # 1. Check whether the user has defined `decoder_input_ids` manually. To facilitate in terms of input naming, # we also allow the user to pass it under `input_ids`, if the encoder does not use it as the main input. if model_kwargs is not None and "decoder_input_ids" in model_kwargs: decoder_input_ids = model_kwargs.pop("decoder_input_ids") elif "input_ids" in model_kwargs and model_input_name != "input_ids": decoder_input_ids = model_kwargs.pop("input_ids") else: decoder_input_ids = None # 2. Encoder-decoder models expect the `decoder_input_ids` to start with a special token. Let's ensure that. decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id) decoder_input_ids_start = tf.ones((batch_size, 1), dtype=tf.int32) * decoder_start_token_id # no user input -> use decoder_start_token_id as decoder_input_ids if decoder_input_ids is None: decoder_input_ids = decoder_input_ids_start # user input but doesn't start with decoder_start_token_id -> prepend decoder_start_token_id (and adjust # decoder_attention_mask if provided) elif tf.reduce_all(decoder_input_ids[:, 0] != decoder_start_token_id): decoder_input_ids = tf.concat([decoder_input_ids_start, decoder_input_ids], axis=-1) if "decoder_attention_mask" in model_kwargs: decoder_attention_mask = model_kwargs["decoder_attention_mask"] decoder_attention_mask = tf.concat( (tf.ones_like(decoder_attention_mask)[:, :1], decoder_attention_mask), axis=-1, ) model_kwargs["decoder_attention_mask"] = decoder_attention_mask return decoder_input_ids, model_kwargs def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int: # retrieve decoder_start_token_id for encoder-decoder models # fall back to bos_token_id if necessary decoder_start_token_id = ( decoder_start_token_id if decoder_start_token_id is not None else self.generation_config.decoder_start_token_id ) bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id if decoder_start_token_id is not None: return decoder_start_token_id elif bos_token_id is not None: return bos_token_id raise ValueError( "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation." ) @staticmethod def _expand_inputs_for_generation( expand_size: int = 1, is_encoder_decoder: bool = False, input_ids: Optional[tf.Tensor] = None, expand_in_new_axis: bool = False, **model_kwargs, ) -> Tuple[tf.Tensor, Dict[str, Any]]: """ Expands tensors from [batch_size, ...] to [batch_size * expand_size, ...] or [batch_size, expand_size, ...], depending on `expand_in_new_axis`. Beam-based approaches expect this function to be used with `expand_in_new_axis=True` """ def _expand_tensor(tensor: tf.Tensor): if expand_in_new_axis: shape = shape_list(tensor) return tf.broadcast_to(tensor[:, None], (shape[0], expand_size) + tuple(shape[1:])) else: return tf.repeat(tensor, expand_size, axis=0) def _expand_dict_for_generation(dict_to_expand): for key in dict_to_expand: if dict_to_expand[key] is not None and isinstance(dict_to_expand[key], tf.Tensor): dict_to_expand[key] = _expand_tensor(dict_to_expand[key]) return dict_to_expand if input_ids is not None: input_ids = _expand_tensor(input_ids) model_kwargs = _expand_dict_for_generation(model_kwargs) if is_encoder_decoder: if model_kwargs.get("encoder_outputs") is None: raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.") model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"]) return input_ids, model_kwargs def _prepare_model_inputs( self, inputs: Optional[tf.Tensor] = None, bos_token_id: Optional[int] = None, model_kwargs: Optional[Dict[str, tf.Tensor]] = None, ) -> Tuple[tf.Tensor, Optional[str], Dict[str, tf.Tensor]]: """ This function extracts the model-specific `inputs` for generation. """ # 1. retrieve all kwargs that are non-None or non-model input related. # some encoder-decoder models have different names for model and encoder if ( self.config.is_encoder_decoder and hasattr(self, "encoder") and hasattr(self.encoder, "main_input_name") and self.encoder.main_input_name != self.main_input_name ): input_name = self.encoder.main_input_name else: input_name = self.main_input_name model_kwargs = {k: v for k, v in model_kwargs.items() if v is not None or k != input_name} # 2. check whether model_input_name is passed as kwarg # if yes and `inputs` is None use kwarg inputs inputs_kwarg = model_kwargs.pop(input_name, None) if inputs_kwarg is not None and inputs is not None: raise ValueError( f"`inputs`: {inputs}` were passed alongside {input_name} which is not allowed." f"Make sure to either pass {inputs} or {input_name}=..." ) elif inputs_kwarg is not None: inputs = inputs_kwarg # 3. In the presence of `inputs_embeds` for text models: # - decoder-only models should complain if the user attempts to pass `inputs_embeds`, but the model # doesn't have its forwarding implemented. `inputs_embeds` is kept in `model_kwargs` and can coexist with # input_ids (`inputs_embeds` will be used in the 1st generation step, as opposed to `input_ids`) # - encoder-decoder models should complain if the user attempts to pass `inputs_embeds` and `input_ids`, and # pull the former to inputs. It will be used in place of `input_ids` to get the encoder hidden states. if input_name == "input_ids" and "inputs_embeds" in model_kwargs: if not self.config.is_encoder_decoder: has_inputs_embeds_forwarding = "inputs_embeds" in set( inspect.signature(self.prepare_inputs_for_generation).parameters.keys() ) if not has_inputs_embeds_forwarding: raise ValueError( f"You passed `inputs_embeds` to `.generate()`, but the model class {self.__class__.__name__} " "doesn't have its forwarding implemented. See the GPT2 implementation for an example " "(https://github.com/huggingface/transformers/pull/21405), and feel free to open a PR with it!" ) # In this case, `input_ids` is moved to the `model_kwargs`, so a few automations (like the creation of # the attention mask) can rely on the actual model input. model_kwargs["input_ids"] = self._maybe_initialize_input_ids_for_generation( inputs, bos_token_id, model_kwargs=model_kwargs ) else: if inputs is not None: raise ValueError("You passed `inputs_embeds` and `input_ids` to `.generate()`. Please pick one.") inputs, input_name = model_kwargs["inputs_embeds"], "inputs_embeds" # 4. if `inputs` is still None, try to create `input_ids` from BOS token inputs = self._maybe_initialize_input_ids_for_generation(inputs, bos_token_id, model_kwargs) return inputs, input_name, model_kwargs def _maybe_initialize_input_ids_for_generation( self, inputs: Optional[tf.Tensor] = None, bos_token_id: Optional[int] = None, model_kwargs: Optional[Dict[str, tf.Tensor]] = None, ) -> tf.Tensor: """Initializes input ids for generation, if necessary.""" if inputs is not None: return inputs encoder_outputs = model_kwargs.get("encoder_outputs") if self.config.is_encoder_decoder and encoder_outputs is not None: # make dummy input_ids with value -100, as a sanity check ensuring that they won't be used for encoding shape = encoder_outputs.last_hidden_state.shape[:-1] return tf.ones(shape, dtype=tf.int32) * -100 if bos_token_id is None: raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.") # If there is some tensor in `model_kwargs`, we can infer the batch size from it. This is helpful with # soft-prompting or in multimodal implementations built on top of decoder-only language models. batch_size = 1 for value in model_kwargs.values(): if isinstance(value, tf.Tensor): batch_size = value.shape[0] break return tf.ones((batch_size, 1), dtype=tf.int32) * bos_token_id @staticmethod def _extract_past_from_model_output(outputs: ModelOutput): past_key_values = None if "past_key_values" in outputs: past_key_values = outputs.past_key_values elif "mems" in outputs: past_key_values = outputs.mems elif "past_buckets_states" in outputs: past_key_values = outputs.past_buckets_states return past_key_values def _update_model_kwargs_for_generation( self, outputs: ModelOutput, model_kwargs: Dict[str, Any], is_encoder_decoder: bool = False ) -> Dict[str, Any]: # update past_key_values model_kwargs["past_key_values"] = self._extract_past_from_model_output(outputs) # update attention mask if not is_encoder_decoder: if "attention_mask" in model_kwargs: attention_mask = model_kwargs["attention_mask"] model_kwargs["attention_mask"] = tf.concat( [attention_mask, tf.ones((shape_list(attention_mask)[0], 1), dtype=tf.int32)], axis=-1 ) return model_kwargs def _update_model_kwargs_for_xla_generation( self, model_outputs: ModelOutput, model_kwargs: Dict[str, Any], cur_len: int, max_length: int, batch_size: int, is_encoder_decoder: bool = False, batch_axis: int = 0, ): def _initialize_attention(model_kwargs, num_padding_values, is_encoder_decoder): """initializes the appropriate attention mask -- encoder-decoder models use `decoder_attention_mask`""" if is_encoder_decoder: # One 1 for decoder_start_token_id, 0s for the currently-unfilled locations in the past_key_values tensor, # 1s for the actual input_ids decoder_attention_mask = tf.concat( [ tf.ones((batch_size, 1), dtype=tf.int32), tf.zeros((batch_size, num_padding_values), dtype=tf.int32), tf.ones((batch_size, 1), dtype=tf.int32), ], axis=1, ) mask = {"decoder_attention_mask": decoder_attention_mask} else: attention_mask = model_kwargs.pop("attention_mask") # 0s for the currently-unfilled locations in the past_key_values tensor, 1s for the actual input_ids attention_mask = tf.concat( [ attention_mask, tf.zeros((batch_size, num_padding_values), dtype=attention_mask.dtype), tf.ones((batch_size, 1), dtype=attention_mask.dtype), ], axis=1, ) mask = {"attention_mask": attention_mask} return mask def _update_attention(model_kwargs, new_past_index, is_encoder_decoder): """updates the appropriate attention mask -- encoder-decoder models use `decoder_attention_mask`""" update_start = tf.constant([0, 1], dtype=tf.int32) * new_past_index if is_encoder_decoder: decoder_attention_mask = model_kwargs.pop("decoder_attention_mask") decoder_attention_mask_update_slice = tf.ones((batch_size, 1), dtype=decoder_attention_mask.dtype) decoder_attention_mask = dynamic_update_slice( decoder_attention_mask, decoder_attention_mask_update_slice, update_start ) mask = {"decoder_attention_mask": decoder_attention_mask} else: attention_mask = model_kwargs.pop("attention_mask") attention_mask_update_slice = tf.ones((batch_size, 1), dtype=attention_mask.dtype) attention_mask = dynamic_update_slice(attention_mask, attention_mask_update_slice, update_start) mask = {"attention_mask": attention_mask} return mask def _initialize_past(past_key_values, num_padding_values, batch_axis): """initialize past_key_values with zeros -- the structure depends on `batch_axis`""" if batch_axis == 0: padding_values = tf.constant([[0, 0], [0, 0], [0, num_padding_values], [0, 0]], dtype=tf.int32) new_past = () for past_layer in past_key_values: new_past_layer = list(past_layer) for i in range(len(new_past_layer[:2])): new_past_layer[i] = tf.pad(past_layer[i], padding_values) new_past += (tuple(new_past_layer),) else: padding_values = tf.scatter_nd(indices=[[3, 1]], updates=[num_padding_values], shape=(5, 2)) new_past = list(past_key_values) for i in range(len(past_key_values)): new_past[i] = tf.pad(past_key_values[i], padding_values) return new_past def _update_past(past_key_values, new_past_index, batch_axis): if batch_axis == 0: slice_start_base = tf.constant([0, 0, 1, 0]) new_past = () for past_layer in past_key_values: new_past_layer = list(past_layer) for i in range(len(new_past_layer[:2])): update_slice = past_layer[i][:, :, -1:] # Write the last slice to the first open location in the padded past_key_values array # and then truncate the last slice off the array new_past_layer[i] = dynamic_update_slice( past_layer[i][:, :, :-1], update_slice, slice_start_base * new_past_index ) new_past += (tuple(new_past_layer),) else: slice_start_base = tf.constant([0, 0, 0, 1, 0]) new_past = [None for _ in range(len(past_key_values))] for i in range(len(past_key_values)): update_slice = past_key_values[i][:, :, :, -1:] # Write the last slice to the first open location in the padded past_key_values array # and then truncate the last slice off the array new_past[i] = dynamic_update_slice( past_key_values[i][:, :, :, :-1], update_slice, slice_start_base * new_past_index ) return new_past past_key_values = self._extract_past_from_model_output(model_outputs) if past_key_values is None: raise ValueError( "No known `past_key_values variable` found in model outputs (model outputs keys:" f" {list(model_outputs.keys())})" ) is_past_initialized = model_kwargs.pop("past_key_values", None) is not None if not is_past_initialized: # The padded version of `past_key_values` has a length of `max_length - 1`, as `past_key_values` holds information relative to # previous autoregressive generation steps (step 0 has no past_key_values, step 1 has 1 past_key_values value, ..., the last step # has `max_length - 1` past_key_values values). num_padding_values = max_length - cur_len - 1 mask = _initialize_attention(model_kwargs, num_padding_values, is_encoder_decoder) new_past = _initialize_past(past_key_values, num_padding_values, batch_axis) else: # The new index of past_key_values to be filled corresponds to the current length of the sequence, with two # subtractions: -1 because past_key_values holds information regarding previous generation steps (read comment above) # and -1 again because in an array the index is the length of the array minus 1. new_past_index = cur_len - 2 mask = _update_attention(model_kwargs, new_past_index, is_encoder_decoder) new_past = _update_past(past_key_values, new_past_index, batch_axis) # sets the updated variables (mask and past_key_values) model_kwargs.update(mask) model_kwargs["past_key_values"] = tuple(new_past) return model_kwargs def _get_logits_warper( self, generation_config: GenerationConfig, ) -> TFLogitsProcessorList: """ This class returns a [`TFLogitsProcessorList`] list object that contains all relevant [`TFLogitsWarper`] instances used for multinomial sampling. """ # instantiate warpers list warpers = TFLogitsProcessorList() # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files # all samplers can be found in `generation_utils_samplers.py` if generation_config.temperature is not None and generation_config.temperature != 1.0: warpers.append(TFTemperatureLogitsWarper(generation_config.temperature)) if generation_config.top_k is not None and generation_config.top_k != 0: warpers.append(TFTopKLogitsWarper(top_k=generation_config.top_k, min_tokens_to_keep=1)) if generation_config.top_p is not None and generation_config.top_p < 1.0: warpers.append(TFTopPLogitsWarper(top_p=generation_config.top_p, min_tokens_to_keep=1)) return warpers def _get_logits_processor( self, generation_config: GenerationConfig, input_ids_seq_length: int, logits_processor: Optional[TFLogitsProcessorList], ) -> TFLogitsProcessorList: """ This class returns a [`TFLogitsProcessorList`] list object that contains all relevant [`TFLogitsProcessor`] instances used to modify the scores of the language model head. """ processors = TFLogitsProcessorList() # instantiate processors list if generation_config.repetition_penalty is not None and generation_config.repetition_penalty != 1.0: processors.append(TFRepetitionPenaltyLogitsProcessor(penalty=generation_config.repetition_penalty)) if generation_config.no_repeat_ngram_size is not None and generation_config.no_repeat_ngram_size > 0: processors.append(TFNoRepeatNGramLogitsProcessor(generation_config.no_repeat_ngram_size)) if generation_config.bad_words_ids is not None: processors.append( TFNoBadWordsLogitsProcessor(generation_config.bad_words_ids, generation_config.eos_token_id) ) if ( generation_config.min_length is not None and generation_config.eos_token_id is not None and generation_config.min_length > 0 ): processors.append(TFMinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id)) if generation_config.forced_bos_token_id is not None: processors.append(TFForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id)) if generation_config.forced_eos_token_id is not None: processors.append( TFForcedEOSTokenLogitsProcessor(generation_config.max_length, generation_config.forced_eos_token_id) ) if generation_config.suppress_tokens is not None: processors.append(TFSuppressTokensLogitsProcessor(generation_config.suppress_tokens)) if generation_config.begin_suppress_tokens is not None: begin_index = input_ids_seq_length begin_index = ( begin_index if (input_ids_seq_length > 1 or generation_config.forced_bos_token_id is None) else begin_index + 1 ) if generation_config.forced_decoder_ids is not None: begin_index += generation_config.forced_decoder_ids[-1][ 0 ] # generation starts after the last token that is forced processors.append( TFSuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index) ) if generation_config.forced_decoder_ids is not None: processors.append(TFForceTokensLogitsProcessor(generation_config.forced_decoder_ids)) processors = self._merge_criteria_processor_list(processors, logits_processor) return processors def _merge_criteria_processor_list( self, default_list: TFLogitsProcessorList, custom_list: TFLogitsProcessorList, ) -> TFLogitsProcessorList: if len(custom_list) == 0: return default_list for default in default_list: for custom in custom_list: if type(custom) is type(default): object_type = "logits processor" raise ValueError( f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to" f" `generate`, but it has already been created with the values {default}. {default} has been" " created by passing the corresponding arguments to generate or by the model's config default" f" values. If you just want to change the default values of {object_type} consider passing" f" them as arguments to `generate` instead of using a custom {object_type}." ) default_list.extend(custom_list) return default_list def greedy_search( self, input_ids: tf.Tensor, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, logits_processor: Optional[TFLogitsProcessorList] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, **model_kwargs, ) -> Union[TFGreedySearchOutput, tf.Tensor]: r""" Generates sequences for models with a language modeling head using greedy decoding. Parameters: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. logits_processor (`TFLogitsProcessorList`, *optional*): An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. max_length (`int`, *optional*, defaults to 20): The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. model_kwargs: Additional model specific keyword arguments will be forwarded to the `call` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation.TFGreedySearchDecoderOnlyOutput`], [`~generation.TFGreedySearchEncoderDecoderOutput`] or `tf.Tensor`: A `tf.Tensor` containing the generated tokens (default behaviour) or a [`~generation.TFGreedySearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation.TFGreedySearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... TFAutoModelForCausalLM, ... TFLogitsProcessorList, ... TFMinLengthLogitsProcessor, ... ) >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> model = TFAutoModelForCausalLM.from_pretrained("gpt2") >>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token >>> model.generation_config.pad_token_id = model.generation_config.eos_token_id >>> input_prompt = "Today is a beautiful day, and" >>> input_ids = tokenizer(input_prompt, return_tensors="tf").input_ids >>> # instantiate logits processors >>> logits_processor = TFLogitsProcessorList( ... [ ... TFMinLengthLogitsProcessor(15, eos_token_id=model.generation_config.eos_token_id), ... ] ... ) >>> outputs = model.greedy_search(input_ids, logits_processor=logits_processor) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ["Today is a beautiful day, and I'm so happy to be here. I'm so happy to"] ```""" # 1. init greedy_search values logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList() max_length = max_length if max_length is not None else self.generation_config.max_length pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] output_scores = output_scores if output_scores is not None else self.generation_config.output_scores output_attentions = ( output_attentions if output_attentions is not None else self.generation_config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.generation_config.return_dict_in_generate ) use_cache = model_kwargs.pop("use_cache", self.generation_config.use_cache) use_xla = not tf.executing_eagerly() # TODO (Joao): fix cache format or find programatic way to detect cache index # GPT2 and other models has a slightly different cache structure, with a different batch axis model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self) cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0 # some models, like XLNet, need more than the last token in the presence of past_key_values needs_full_input = "use_mems" in set(inspect.signature(self.prepare_inputs_for_generation).parameters.keys()) # 2. init `attentions`, `hidden_states`, and `scores` tuples scores = [] if (return_dict_in_generate and output_scores) else None decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None cross_attentions = [] if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None # 3. init tensors to use for "xla-compileable" generate function batch_size, cur_len = shape_list(input_ids) # initialize `generated` (`input_ids` padded with `pad_token_id`), `finished_sequences` input_ids_padding = tf.ones((batch_size, max_length - cur_len), dtype=tf.int32) * (pad_token_id or 0) generated = tf.concat([input_ids, input_ids_padding], axis=-1) finished_sequences = tf.zeros((batch_size,), dtype=tf.bool) # 4. define "xla-compile-able" stop-condition and auto-regressive function # define condition fn def greedy_search_cond_fn(generated, finished_sequences, cur_len, model_kwargs): """state termination condition fn.""" return ~tf.reduce_all(finished_sequences) # define condition fn def greedy_search_body_fn(generated, finished_sequences, cur_len, model_kwargs): """state update fn.""" if model_kwargs.get("past_key_values") is None or needs_full_input: input_ids = generated[:, :cur_len] else: input_ids = tf.expand_dims(generated[:, cur_len - 1], -1) model_inputs = self.prepare_inputs_for_generation(input_ids, use_cache=use_cache, **model_kwargs) # forward pass to get next token logits model_outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) next_token_logits = model_outputs.logits[:, -1] # pre-process distribution next_tokens_scores = logits_processor(generated, next_token_logits, cur_len) # Store scores, attentions and hidden_states when required if not use_xla and return_dict_in_generate: if output_scores: scores.append(next_tokens_scores) if output_attentions and self.config.is_encoder_decoder: decoder_attentions.append(model_outputs.decoder_attentions) elif output_attentions and not self.config.is_encoder_decoder: decoder_attentions.append(model_outputs.attentions) if self.config.is_encoder_decoder: cross_attentions.append(model_outputs.cross_attentions) if output_hidden_states and self.config.is_encoder_decoder: decoder_hidden_states.append(model_outputs.decoder_hidden_states) elif output_hidden_states and self.config.is_encoder_decoder: decoder_hidden_states.append(model_outputs.hidden_states) # argmax next_tokens = tf.argmax(next_tokens_scores, axis=-1, output_type=tf.int32) if eos_token_id is not None: if pad_token_id is None: raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") unfinished_seq = 1 - tf.cast(finished_sequences, tf.int32) next_tokens = next_tokens * unfinished_seq + pad_token_id * (1 - unfinished_seq) next_token_is_eos = tf.math.reduce_any( tf.equal( tf.broadcast_to(next_tokens, (len(eos_token_id), batch_size)), tf.expand_dims(eos_token_id, -1) ), axis=0, ) finished_sequences = finished_sequences | next_token_is_eos # update `generated` and `cur_len` update_indices = tf.stack([tf.range(batch_size), tf.broadcast_to(cur_len, [batch_size])], axis=-1) generated = tf.tensor_scatter_nd_update(tensor=generated, indices=update_indices, updates=next_tokens) cur_len += 1 # update model_kwargs if use_xla: model_kwargs = self._update_model_kwargs_for_xla_generation( model_outputs=model_outputs, model_kwargs=model_kwargs, cur_len=cur_len, max_length=max_length, batch_size=batch_size, is_encoder_decoder=self.config.is_encoder_decoder, batch_axis=cache_batch_axis, ) else: model_kwargs = self._update_model_kwargs_for_generation( model_outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # if we don't cache past_key_values key values we need the whole input if model_kwargs.get("past_key_values", None) is None: # let's throw out `past_key_values` since we don't want `None` tensors model_kwargs.pop("past_key_values", None) return generated, finished_sequences, cur_len, model_kwargs # 5. run generation # 1st generation step has to be run before to initialize `past_key_values` generated, finished_sequences, cur_len, model_kwargs = greedy_search_body_fn( generated, finished_sequences, cur_len, model_kwargs ) # 2-to-n generation steps can then be run in autoregressive fashion # only in case 1st generation step does NOT yield EOS token though maximum_iterations = max_length - cur_len generated, _, cur_len, _ = tf.while_loop( greedy_search_cond_fn, greedy_search_body_fn, (generated, finished_sequences, cur_len, model_kwargs), maximum_iterations=maximum_iterations, ) # 6. prepare outputs if not use_xla: # cut for backward compatibility generated = generated[:, :cur_len] if return_dict_in_generate: if self.config.is_encoder_decoder: # if model is an encoder-decoder, retrieve encoder attention weights # and hidden states encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) scores = tuple(scores) if scores is not None else None decoder_attentions = tuple(decoder_attentions) if decoder_attentions is not None else None cross_attentions = tuple(cross_attentions) if cross_attentions is not None else None decoder_hidden_states = tuple(decoder_hidden_states) if decoder_hidden_states is not None else None return TFGreedySearchEncoderDecoderOutput( sequences=generated, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return TFGreedySearchDecoderOnlyOutput( sequences=generated, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return generated def sample( self, input_ids: tf.Tensor, logits_processor: Optional[TFLogitsProcessorList] = None, logits_warper: Optional[TFLogitsProcessorList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, seed: Optional[Tuple[int, int]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, **model_kwargs, ) -> Union[TFSampleOutput, tf.Tensor]: r""" Generates sequences for models with a language modeling head using multinomial sampling. Parameters: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. logits_processor (`TFLogitsProcessorList`, *optional*): An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. logits_warper (`TFLogitsProcessorList`, *optional*): An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. max_length (`int`, *optional*, defaults to 20): The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. seed (`List[int]`, *optional*): Random seed to control sampling, containing two integers, used when `do_sample` is `True`. See the `seed` argument from stateless functions in `tf.random`. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. model_kwargs: Additional model specific kwargs will be forwarded to the `call` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation.TFSampleDecoderOnlyOutput`], [`~generation.TFSampleEncoderDecoderOutput`] or `tf.Tensor`: A `tf.Tensor` containing the generated tokens (default behaviour) or a [`~generation.TFSampleDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation.TFSampleEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> import tensorflow as tf >>> from transformers import ( ... AutoTokenizer, ... TFAutoModelForCausalLM, ... TFLogitsProcessorList, ... TFMinLengthLogitsProcessor, ... TFTopKLogitsWarper, ... TFTemperatureLogitsWarper, ... ) >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> model = TFAutoModelForCausalLM.from_pretrained("gpt2") >>> # set pad_token_id to eos_token_id because GPT2 does not have a EOS token >>> model.generation_config.pad_token_id = model.generation_config.eos_token_id >>> input_prompt = "Today is a beautiful day, and" >>> input_ids = tokenizer(input_prompt, return_tensors="tf").input_ids >>> # instantiate logits processors >>> logits_processor = TFLogitsProcessorList( ... [ ... TFMinLengthLogitsProcessor(15, eos_token_id=model.generation_config.eos_token_id), ... ] ... ) >>> # instantiate logits processors >>> logits_warper = TFLogitsProcessorList( ... [ ... TFTopKLogitsWarper(50), ... TFTemperatureLogitsWarper(0.7), ... ] ... ) >>> tf.random.set_seed(0) >>> outputs = model.sample(input_ids, logits_processor=logits_processor, logits_warper=logits_warper) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Today is a beautiful day, and I love my country. But when I look at Donald Trump,'] ```""" # 1. init greedy_search values logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList() logits_warper = logits_warper if logits_warper is not None else TFLogitsProcessorList() max_length = max_length if max_length is not None else self.generation_config.max_length pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] output_scores = output_scores if output_scores is not None else self.generation_config.output_scores output_attentions = ( output_attentions if output_attentions is not None else self.generation_config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.generation_config.return_dict_in_generate ) use_cache = model_kwargs.pop("use_cache", self.generation_config.use_cache) use_xla = not tf.executing_eagerly() # TODO (Joao): fix cache format or find programatic way to detect cache index # GPT2 and other models has a slightly different cache structure, with a different batch axis model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self) cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0 # some models, like XLNet, need more than the last token in the presence of past_key_values needs_full_input = "use_mems" in set(inspect.signature(self.prepare_inputs_for_generation).parameters.keys()) # 2. init `attentions`, `hidden_states`, and `scores` tuples scores = [] if (return_dict_in_generate and output_scores) else None decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None cross_attentions = [] if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None # 3. init tensors to use for "xla-compileable" generate function batch_size, cur_len = shape_list(input_ids) # initialize `generated` (pre-populated with `pad_token_id`), `finished_sequences` input_ids_padding = tf.ones((batch_size, max_length - cur_len), dtype=tf.int32) * (pad_token_id or 0) generated = tf.concat([input_ids, input_ids_padding], axis=-1) finished_sequences = tf.zeros((batch_size,), dtype=tf.bool) # 4. define "xla-compile-able" stop-condition and auto-regressive function def sample_cond_fn(generated, finished_sequences, cur_len, model_kwargs): return ~tf.reduce_all(finished_sequences) def sample_body_fn(generated, finished_sequences, cur_len, model_kwargs): if model_kwargs.get("past_key_values") is None or needs_full_input: input_ids = generated[:, :cur_len] else: input_ids = tf.expand_dims(generated[:, cur_len - 1], -1) model_inputs = self.prepare_inputs_for_generation(input_ids, use_cache=use_cache, **model_kwargs) # forward pass to get next token logits model_outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) next_token_logits = model_outputs.logits[:, -1] # pre-process distribution next_tokens_scores = logits_processor(generated, next_token_logits, cur_len) next_tokens_scores = logits_warper(generated, next_tokens_scores, cur_len) # Store scores, attentions and hidden_states when required if not use_xla and return_dict_in_generate: if output_scores: scores.append(next_tokens_scores) if output_attentions and self.config.is_encoder_decoder: decoder_attentions.append(model_outputs.decoder_attentions) elif output_attentions and not self.config.is_encoder_decoder: decoder_attentions.append(model_outputs.attentions) if self.config.is_encoder_decoder: cross_attentions.append(model_outputs.cross_attentions) if output_hidden_states and self.config.is_encoder_decoder: decoder_hidden_states.append(model_outputs.decoder_hidden_states) elif output_hidden_states and self.config.is_encoder_decoder: decoder_hidden_states.append(model_outputs.hidden_states) # sample if seed is not None: sample_seed = seed else: sample_seed = tf.experimental.numpy.random.randint(tf.int32.min, tf.int32.max, (2,), dtype=tf.int32) next_tokens = tf.squeeze( tf.random.stateless_categorical( logits=next_tokens_scores, num_samples=1, seed=sample_seed, dtype=tf.int32 ), axis=1, ) if eos_token_id is not None: if pad_token_id is None: raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") unfinished_seq = 1 - tf.cast(finished_sequences, tf.int32) next_tokens = next_tokens * unfinished_seq + pad_token_id * (1 - unfinished_seq) next_token_is_eos = tf.math.reduce_any( tf.equal( tf.broadcast_to(next_tokens, (len(eos_token_id), batch_size)), tf.expand_dims(eos_token_id, -1) ), axis=0, ) finished_sequences = finished_sequences | next_token_is_eos # update `generated` and `cur_len` update_indices = tf.stack([tf.range(batch_size), tf.broadcast_to(cur_len, [batch_size])], axis=-1) generated = tf.tensor_scatter_nd_update(tensor=generated, indices=update_indices, updates=next_tokens) cur_len += 1 # update model_kwargs if use_xla: model_kwargs = self._update_model_kwargs_for_xla_generation( model_outputs=model_outputs, model_kwargs=model_kwargs, cur_len=cur_len, max_length=max_length, batch_size=batch_size, is_encoder_decoder=self.config.is_encoder_decoder, batch_axis=cache_batch_axis, ) else: model_kwargs = self._update_model_kwargs_for_generation( model_outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # if we don't cache past_key_values key values we need the whole input if model_kwargs.get("past_key_values", None) is None: # let's throw out `past_key_values` since we don't want `None` tensors model_kwargs.pop("past_key_values", None) return generated, finished_sequences, cur_len, model_kwargs # 5. run generation # 1st generation step has to be run before to initialize `past_key_values` generated, finished_sequences, cur_len, model_kwargs = sample_body_fn( generated, finished_sequences, cur_len, model_kwargs ) # 2-to-n generation steps can then be run in autoregressive fashion # only in case 1st generation step does NOT yield EOS token though maximum_iterations = max_length - cur_len generated, _, cur_len, _ = tf.while_loop( sample_cond_fn, sample_body_fn, (generated, finished_sequences, cur_len, model_kwargs), maximum_iterations=maximum_iterations, ) # 6. prepare outputs if not use_xla: # cut for backward compatibility generated = generated[:, :cur_len] if return_dict_in_generate: if self.config.is_encoder_decoder: # if model is an encoder-decoder, retrieve encoder attention weights # and hidden states encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) scores = tuple(scores) if scores is not None else None decoder_attentions = tuple(decoder_attentions) if decoder_attentions is not None else None cross_attentions = tuple(cross_attentions) if cross_attentions is not None else None decoder_hidden_states = tuple(decoder_hidden_states) if decoder_hidden_states is not None else None return TFSampleEncoderDecoderOutput( sequences=generated, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return TFSampleDecoderOnlyOutput( sequences=generated, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return generated @staticmethod def _gather_beams(nested, beam_indices, batch_axis=0): """Gathers the beam slices indexed by beam_indices into new beam array.""" def gather_fn(tensor): if batch_axis > 0: # pushes all dimentions before the batch to the end, so we get (batch, beam_id, ...) perm = tf.concat((tf.range(tf.rank(tensor))[batch_axis:], tf.range(batch_axis)), axis=0) tensor = tf.transpose(tensor, perm=perm) gathered_tensor = tf.gather(params=tensor, indices=beam_indices, axis=1, batch_dims=1) if batch_axis > 0: # transposes back to the original dimensions perm = tf.concat((tf.range(tf.rank(tensor))[batch_axis:], tf.range(batch_axis)), axis=0) perm = tf.math.invert_permutation(perm) gathered_tensor = tf.transpose(gathered_tensor, perm=perm) return gathered_tensor return tf.nest.map_structure(gather_fn, nested) def beam_search( self, input_ids: tf.Tensor, do_sample: bool = False, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, length_penalty: Optional[float] = None, early_stopping: Optional[Union[bool, str]] = None, logits_processor: Optional[TFLogitsProcessorList] = None, logits_warper: Optional[TFLogitsProcessorList] = None, num_return_sequences: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, **model_kwargs, ) -> Union[TFBeamSearchOutput, TFBeamSampleOutput, tf.Tensor]: r""" Generates sequences for models with a language modeling head using beam search. If `do_sample` is `False`, uses a greedy approach, otherwise does multinomial sampling without replacement. Parameters: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. do_sample (`bool`, *optional*, defaults to `False`): Whether or not to use sampling ; use greedy decoding otherwise. max_length (`int`, *optional*, defaults to 20): The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. length_penalty (`float`, *optional*, defaults to 1.0): Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while `length_penalty` < 0.0 encourages shorter sequences. early_stopping (`bool` or `str`, *optional*, defaults to `False`): Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values: `True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an heuristic is applied and the generation stops when is it very unlikely to find better candidates; `"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical beam search algorithm). logits_processor (`[TFLogitsProcessorList]`, *optional*): An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. logits_warper (`TFLogitsProcessorList`, *optional*): An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. num_return_sequences(`int`, *optional*, defaults to 1): The number of independently computed returned sequences for each element in the batch. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. model_kwargs: Additional model specific kwargs will be forwarded to the `call` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation.TFBeamSearchDecoderOnlyOutput`], [`~generation.TFBeamSearchEncoderDecoderOutput`] or `tf.Tensor`: A `tf.Tensor` containing the generated tokens (default behaviour) or a [`~generation.TFBeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation.TFBeamSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... TFAutoModelForSeq2SeqLM, ... TFLogitsProcessorList, ... TFMinLengthLogitsProcessor, ... ) >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") >>> model = TFAutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> encoder_input_str = "translate English to German: How old are you?" >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="tf").input_ids >>> # lets run beam search using 3 beams >>> num_beams = 3 >>> # define decoder start token ids >>> input_ids = tf.ones((1, num_beams, 1), dtype=tf.int32) >>> input_ids = input_ids * model.generation_config.decoder_start_token_id >>> # add encoder_outputs to model keyword arguments >>> encoder_outputs = model.get_encoder()(encoder_input_ids, return_dict=True) >>> encoder_outputs.last_hidden_state = tf.repeat( ... tf.expand_dims(encoder_outputs.last_hidden_state, axis=0), num_beams, axis=1 ... ) >>> model_kwargs = {"encoder_outputs": encoder_outputs} >>> # instantiate logits processors >>> logits_processor = TFLogitsProcessorList( ... [TFMinLengthLogitsProcessor(5, eos_token_id=model.generation_config.eos_token_id)] ... ) >>> outputs = model.beam_search(input_ids, logits_processor=logits_processor, **model_kwargs) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Wie alt bist du?'] ```""" def flatten_beam_dim(tensor, batch_axis=0): """Flattens the first two dimensions of a non-scalar array.""" shape = shape_list(tensor) return tf.reshape( tensor, shape[:batch_axis] + [shape[batch_axis] * shape[batch_axis + 1]] + shape[batch_axis + 2 :], ) def unflatten_beam_dim(tensor, num_beams, batch_axis=0): """Unflattens the first, flat batch*beam dimension of a non-scalar array.""" shape = shape_list(tensor) return tf.reshape(tensor, shape[:batch_axis] + [-1, num_beams] + shape[batch_axis + 1 :]) # 1. init beam_search values logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList() logits_warper = logits_warper if logits_warper is not None else TFLogitsProcessorList() max_length = max_length if max_length is not None else self.generation_config.max_length pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] num_return_sequences = ( num_return_sequences if num_return_sequences is not None else self.generation_config.num_return_sequences ) output_attentions = ( output_attentions if output_attentions is not None else self.generation_config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states ) output_scores = output_scores if output_scores is not None else self.generation_config.output_scores return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.generation_config.return_dict_in_generate ) length_penalty = length_penalty if length_penalty is not None else self.generation_config.length_penalty early_stopping = early_stopping if early_stopping is not None else self.generation_config.early_stopping use_cache = model_kwargs.pop("use_cache", self.generation_config.use_cache) use_xla = not tf.executing_eagerly() # TODO (Joao): fix cache format or find programatic way to detect cache index # GPT2 and other models has a slightly different cache structure, with a different batch axis model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self) cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0 # some models, like XLNet, need more than the last token in the presence of past_key_values needs_full_input = "use_mems" in set(inspect.signature(self.prepare_inputs_for_generation).parameters.keys()) # 2. init `attentions`, `hidden_states`, and `scores` tuples all_scores = [] if (return_dict_in_generate and output_scores) else None decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None cross_attentions = [] if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None # 3. init tensors to use for "xla-compileable" generate function batch_size, num_beams, cur_len = shape_list(input_ids) # per batch, beam-item holding current token in loop, pre-populated with `pad_token_id` input_ids_padding = tf.ones((batch_size, num_beams, max_length - cur_len), dtype=tf.int32) * ( pad_token_id or 0 ) running_sequences = tf.concat([input_ids, input_ids_padding], axis=-1) sequences = tf.ones((batch_size, num_beams, max_length), dtype=tf.int32) * (pad_token_id or 0) # per batch,beam-item state bit indicating if sentence has finished. is_sent_finished = tf.zeros((batch_size, num_beams), dtype=tf.bool) # per batch, beam-item score, logprobs running_scores = tf.tile( tf.expand_dims(tf.convert_to_tensor([0.0] + [-1.0e9] * (num_beams - 1)), axis=0), [batch_size, 1] ) scores = tf.ones((batch_size, num_beams)) * -1.0e9 # per batch beam indices running_beam_indices = tf.ones((batch_size, num_beams, max_length), dtype=tf.int32) * -1 beam_indices = tf.ones((batch_size, num_beams, max_length), dtype=tf.int32) * -1 # flatten beam dim if "encoder_outputs" in model_kwargs: model_kwargs["encoder_outputs"]["last_hidden_state"] = flatten_beam_dim( model_kwargs["encoder_outputs"]["last_hidden_state"] ) if "attention_mask" in model_kwargs: model_kwargs["attention_mask"] = flatten_beam_dim(model_kwargs["attention_mask"]) # 4. define "xla-compile-able" stop-condition and auto-regressive function # define stop-condition and auto-regressive function def beam_search_cond_fn( cur_len, running_sequences, running_scores, running_beam_indices, sequences, scores, beam_indices, is_sent_finished, model_kwargs, ): """ Beam Search termination condition function -- halts the generation loop if any of these conditions becomes False """ # 1. is less than max length? not_max_length_yet = cur_len < max_length # 2. can the new beams still improve? # early_stopping == False -> apply heuristic = always get the best score from `cur_len`. See the discussion # below for more details. # https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565 # early_stopping == "never" -> compute the best score from max_length or cur_len, depending on the sign of # length_penalty. Positive length_penalty favors longer sequences, thus we use max_length there. if early_stopping == "never" and length_penalty > 0.0: best_running_score = running_scores[:, :1] / (max_length**length_penalty) else: best_running_score = running_scores[:, :1] / (tf.cast(cur_len, dtype=tf.float32) ** length_penalty) worst_finished_score = tf.where( is_sent_finished, tf.math.reduce_min(scores, axis=1, keepdims=True), -1.0e9 ) improvement_still_possible = tf.math.reduce_any(best_running_score > worst_finished_score) # 3. is there still a beam that has not finished? still_open_beam = ~(tf.math.reduce_all(is_sent_finished) & (early_stopping is True)) return not_max_length_yet & still_open_beam & improvement_still_possible def beam_search_body_fn( cur_len, running_sequences, running_scores, running_beam_indices, sequences, scores, beam_indices, is_sent_finished, model_kwargs, ): """ Beam Search iterative update function -- each iteration adds a new token and updates the best sequences seen so far """ # 1. Forward current tokens if model_kwargs.get("past_key_values") is None or needs_full_input: input_ids = running_sequences[:, :, :cur_len] else: input_ids = tf.expand_dims(running_sequences[:, :, cur_len - 1], -1) model_inputs = self.prepare_inputs_for_generation( flatten_beam_dim(input_ids), use_cache=use_cache, **model_kwargs ) model_outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) logits = unflatten_beam_dim(model_outputs.logits[:, -1], num_beams) # 2. Compute log probs # get log probabilities from logits, process logits with processors (*e.g.* min_length, ...), and # add new logprobs to existing running logprobs scores. log_probs = tf.nn.log_softmax(logits) log_probs = logits_processor(flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs), cur_len) log_probs = unflatten_beam_dim(log_probs, num_beams) log_probs_processed = log_probs log_probs = log_probs + tf.expand_dims(running_scores, axis=2) if do_sample: # Note: logits warpers are intentionally applied after adding running beam scores. On some logits # warpers (like top_p) this is indiferent, but on others (like temperature) it is not. For reference, # see https://github.com/huggingface/transformers/pull/5420#discussion_r449779867 log_probs = logits_warper(flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs), cur_len) log_probs = unflatten_beam_dim(log_probs, num_beams) vocab_size = log_probs.shape[2] log_probs = tf.reshape(log_probs, (batch_size, num_beams * vocab_size)) # Store scores, attentions and hidden_states when required if not use_xla and return_dict_in_generate: if output_scores: all_scores.append( logits_warper( flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs_processed), cur_len ) ) if output_attentions and self.config.is_encoder_decoder: decoder_attentions.append(model_outputs.decoder_attentions) elif output_attentions and not self.config.is_encoder_decoder: decoder_attentions.append(model_outputs.attentions) if self.config.is_encoder_decoder: cross_attentions.append(model_outputs.cross_attentions) if output_hidden_states and self.config.is_encoder_decoder: decoder_hidden_states.append(model_outputs.decoder_hidden_states) elif output_hidden_states and self.config.is_encoder_decoder: decoder_hidden_states.append(model_outputs.hidden_states) # 3. Retrieve top-K # Each item in batch has num_beams * vocab_size candidate sequences. For each item, get the top 2*k # candidates with the highest log-probabilities. We gather the top 2*K beams here so that even if the # best K sequences reach EOS simultaneously, we have another K sequences remaining to continue the live # beam search. # Gather the top 2*K scores from _all_ beams. # Gather 2*k top beams. # Recover the beam index by floor division. # Recover token id by modulo division and expand Id array for broadcasting. # Update sequences for the 2*K top-k new sequences. beams_to_keep = 2 * num_beams if do_sample: topk_indices = sample_without_replacement(log_probs, beams_to_keep) topk_log_probs = tf.gather(log_probs, topk_indices, axis=1, batch_dims=1) else: topk_log_probs, topk_indices = tf.math.top_k(log_probs, k=beams_to_keep) topk_current_beam_indices = topk_indices // vocab_size topk_running_beam_indices = self._gather_beams(running_beam_indices, topk_current_beam_indices) topk_running_sequences = self._gather_beams(running_sequences, topk_current_beam_indices) topk_ids = topk_indices % vocab_size # writes the new token indices_batch = tf.repeat(tf.range(batch_size), [beams_to_keep]) indices_beam = tf.tile(tf.range(beams_to_keep), [batch_size]) update_indices = tf.stack( [indices_batch, indices_beam, tf.broadcast_to(cur_len, [batch_size * beams_to_keep])], axis=-1 ) topk_sequences = tf.tensor_scatter_nd_update( tensor=topk_running_sequences, indices=update_indices, updates=tf.reshape(topk_ids, [batch_size * beams_to_keep]), ) # we want to store the beam indices with batch information -> real beam index = beam index % num beams batch_modified_indices = topk_current_beam_indices + tf.broadcast_to( tf.expand_dims(tf.range(batch_size) * num_beams, axis=1), topk_current_beam_indices.shape ) topk_beam_indices = tf.tensor_scatter_nd_update( tensor=topk_running_beam_indices, indices=update_indices, updates=tf.reshape(batch_modified_indices, [batch_size * beams_to_keep]), ) # 4. Check which sequences have ended # Update current sequences: Did the top `num_beams` sequences reach an end marker? # To prevent these just finished sequences from being added to the current sequences # set of active beam search sequences, set their log probs to a very large negative value. if eos_token_id is None: eos_in_next_token = tf.zeros(topk_sequences[:, :, cur_len].shape, dtype=tf.bool) else: eos_in_next_token = tf.math.reduce_any( tf.equal( tf.broadcast_to( topk_sequences[:, :, cur_len], [len(eos_token_id)] + topk_sequences[:, :, cur_len].shape ), tf.expand_dims(tf.expand_dims(eos_token_id, -1), -1), ), axis=0, ) did_topk_just_finished = eos_in_next_token & tf.broadcast_to( tf.concat((tf.ones((num_beams), dtype=tf.bool), tf.zeros((num_beams), dtype=tf.bool)), axis=0), shape_list(eos_in_next_token), ) # non-top `num_beams` eos tokens can't be used to finish a beam, but the others can't be used in the next # running sentences either running_topk_log_probs = topk_log_probs + tf.cast(eos_in_next_token, tf.float32) * -1.0e9 # 5. Get running sequences scores for next # Determine the top k beam indices (from top 2*k beams) from log probs and gather top k beams # (from top 2*k beams). next_topk_indices = tf.math.top_k(running_topk_log_probs, k=num_beams)[1] next_running_sequences, next_running_scores, next_running_beam_indices = self._gather_beams( [topk_sequences, running_topk_log_probs, topk_beam_indices], next_topk_indices ) # 6. Process topk logits # Further process log probs: # - add length penalty # - make sure no scores can be added anymore if beam is full # - make sure still running sequences cannot be chosen as finalized beam topk_log_probs = topk_log_probs / (tf.cast(cur_len, dtype=tf.float32) ** length_penalty) beams_in_batch_are_full = tf.broadcast_to( tf.math.reduce_all(is_sent_finished, axis=-1, keepdims=True), shape_list(did_topk_just_finished) ) & (early_stopping is True) add_penalty = ~did_topk_just_finished | beams_in_batch_are_full topk_log_probs += tf.cast(add_penalty, tf.float32) * -1.0e9 # 7. Get scores, sequences, is sentence finished for next. # Combine sequences, scores, and flags along the beam dimension and compare new finished sequence scores # to existing finished scores and select the best from the new set of beams merged_sequences = tf.concat([sequences, topk_sequences], axis=1) merged_scores = tf.concat([scores, topk_log_probs], axis=1) merged_beams = tf.concat([beam_indices, topk_beam_indices], axis=1) merged_is_sent_finished = tf.concat([is_sent_finished, did_topk_just_finished], axis=1) topk_merged_indices = tf.math.top_k(merged_scores, k=num_beams)[1] next_sequences, next_scores, next_beam_indices, next_is_sent_finished = self._gather_beams( [merged_sequences, merged_scores, merged_beams, merged_is_sent_finished], topk_merged_indices ) # 8. Prepare data for the next iteration # Determine the top k beam indices from the original set of all beams. With these, gather the top k # beam-associated caches. cur_len = cur_len + 1 if "past_key_values" in model_outputs: cache = tf.nest.map_structure( lambda tensor: unflatten_beam_dim(tensor, num_beams, batch_axis=cache_batch_axis), model_outputs.past_key_values, ) next_running_indices = self._gather_beams(topk_current_beam_indices, next_topk_indices) next_cache = self._gather_beams(cache, next_running_indices, batch_axis=cache_batch_axis) model_outputs["past_key_values"] = tf.nest.map_structure( lambda tensor: flatten_beam_dim(tensor, batch_axis=cache_batch_axis), next_cache ) if use_xla: next_model_kwargs = self._update_model_kwargs_for_xla_generation( model_outputs=model_outputs, model_kwargs=model_kwargs, cur_len=cur_len, max_length=max_length, batch_size=(batch_size * num_beams), is_encoder_decoder=self.config.is_encoder_decoder, batch_axis=cache_batch_axis, ) else: next_model_kwargs = self._update_model_kwargs_for_generation( model_outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # if we don't cache past_key_values key values we need the whole input if model_kwargs.get("past_key_values", None) is None: # let's throw out `past_key_values` since we don't want `None` tensors model_kwargs.pop("past_key_values", None) return ( cur_len, next_running_sequences, next_running_scores, next_running_beam_indices, next_sequences, next_scores, next_beam_indices, next_is_sent_finished, next_model_kwargs, ) # 5. run generation # 1st generation step has to be run before to initialize `past_key_values` (if active) ( cur_len, running_sequences, running_scores, running_beam_indices, sequences, scores, beam_indices, is_sent_finished, model_kwargs, ) = beam_search_body_fn( cur_len, running_sequences, running_scores, running_beam_indices, sequences, scores, beam_indices, is_sent_finished, model_kwargs, ) # 2-to-n generation steps can then be run in autoregressive fashion (only in case 1st generation step does # NOT yield EOS token though) maximum_iterations = max_length - cur_len ( cur_len, running_sequences, running_scores, running_beam_indices, sequences, scores, beam_indices, is_sent_finished, _, ) = tf.while_loop( beam_search_cond_fn, beam_search_body_fn, ( cur_len, running_sequences, running_scores, running_beam_indices, sequences, scores, beam_indices, is_sent_finished, model_kwargs, ), maximum_iterations=maximum_iterations, ) # 6. prepare outputs # Account for the edge-case where there are no finished sequences for a particular batch item. If so, return # running sequences for that batch item. none_finished = tf.math.reduce_any(is_sent_finished, axis=1) sequences = tf.where(none_finished[:, None, None], sequences, running_sequences) beam_indices = tf.where(none_finished[:, None, None], beam_indices, running_beam_indices) # Apply the length penalty so that running scores match the finalized scores if they are used running_scores = running_scores / (tf.cast(cur_len, dtype=tf.float32) ** length_penalty) scores = tf.where(none_finished[:, None], scores, running_scores) # Take best beams for each batch (the score is sorted in descending order) sequences = flatten_beam_dim(sequences[:, :num_return_sequences, :]) scores = flatten_beam_dim(scores[:, :num_return_sequences]) beam_indices = flatten_beam_dim(beam_indices[:, :num_return_sequences, :]) if not use_xla: # Cut for backward compatibility sequences = sequences[:, :cur_len] beam_indices = beam_indices[:, :cur_len] if return_dict_in_generate: if self.config.is_encoder_decoder: # if model is an encoder-decoder, retrieve encoder attention weights and hidden states encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) output_cls = TFBeamSampleEncoderDecoderOutput if do_sample else TFBeamSearchEncoderDecoderOutput return output_cls( sequences=sequences, sequences_scores=scores, scores=all_scores, beam_indices=beam_indices, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: output_cls = TFBeamSampleDecoderOnlyOutput if do_sample else TFBeamSearchDecoderOnlyOutput return output_cls( sequences=sequences, sequences_scores=scores, scores=all_scores, beam_indices=beam_indices, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequences def contrastive_search( self, input_ids: tf.Tensor, top_k: Optional[int] = 1, penalty_alpha: Optional[float] = 0, logits_processor: Optional[TFLogitsProcessorList] = None, logits_warper: Optional[TFLogitsProcessorList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, **model_kwargs, ) -> Union[TFContrastiveSearchOutput, tf.Tensor]: r""" Generates sequences of token ids for models with a language modeling head using **contrastive search** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. top_k (`int`, *optional*, defaults to 1): The size of the candidate set that is used to re-rank for contrastive search penalty_alpha (`float`, *optional*, defaults to 0): The degeneration penalty for contrastive search; activate when it is larger than 0 logits_processor (`TFLogitsProcessorList`, *optional*): An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. logits_warper (`TFLogitsProcessorList`, *optional*): An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. max_length (`int`, *optional*, defaults to 20): The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. model_kwargs: Additional model specific keyword arguments will be forwarded to the `call` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation.TFContrastiveSearchDecoderOnlyOutput`], [`~generation.TFContrastiveSearchEncoderDecoderOutput`] or `tf.Tensor`: A `tf.Tensor` containing the generated tokens (default behaviour) or a [`~generation.TFContrastiveySearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation.TFContrastiveSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import AutoTokenizer, TFAutoModelForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m") >>> model = TFAutoModelForCausalLM.from_pretrained("facebook/opt-125m") >>> # set pad_token_id to eos_token_id because OPT does not have a PAD token >>> model.config.pad_token_id = model.config.eos_token_id >>> input_prompt = "DeepMind Company is" >>> input_ids = tokenizer(input_prompt, return_tensors="tf") >>> outputs = model.contrastive_search(**input_ids, penalty_alpha=0.6, top_k=4, max_length=64) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['DeepMind Company is a company that focuses on the development and commercialization of artificial intelligence (AI). DeepMind’s mission is to help people understand and solve problems that are difficult to solve in the world today.\n\nIn this post, we talk about the benefits of deep learning in business and how it'] ```""" def gather_best_candidate(nested, selected_idx_stacked, batch_axis=0): """Gathers the slices indexed by selected_idx_stacked from a potentially nested structure of tensors.""" def gather_fn(tensor): gathered_tensor = tf.gather(params=tensor, indices=selected_idx_stacked, axis=batch_axis) return gathered_tensor return tf.nest.map_structure(gather_fn, nested) # 1. init greedy_search values logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList() logits_warper = logits_warper if logits_warper is not None else TFLogitsProcessorList() max_length = max_length if max_length is not None else self.generation_config.max_length pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] output_scores = output_scores if output_scores is not None else self.generation_config.output_scores output_attentions = ( output_attentions if output_attentions is not None else self.generation_config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.generation_config.return_dict_in_generate ) use_cache = True # In contrastive search, we always use cache model_kwargs.pop("use_cache", None) use_xla = not tf.executing_eagerly() # TODO (Joao): fix cache format or find programatic way to detect cache index # GPT2 and other models has a slightly different cache structure, with a different batch axis model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self) cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0 # 2. init `attentions`, `hidden_states`, and `scores` tuples scores = [] if (return_dict_in_generate and output_scores) else None decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None cross_attentions = [] if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None # 3. init tensors to use for "xla-compileable" generate function batch_size, cur_len = shape_list(input_ids) # initialize `generated` (`input_ids` padded with `pad_token_id`), `finished_sequences` input_ids_padding = tf.ones((batch_size, max_length - cur_len), dtype=tf.int32) * (pad_token_id or 0) generated = tf.concat([input_ids, input_ids_padding], axis=-1) finished_sequences = tf.zeros((batch_size,), dtype=tf.bool) # 4. define "xla-compile-able" stop-condition and auto-regressive function # define condition fn def contrastive_search_cond_fn( generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables ): """state termination condition fn.""" return ~tf.reduce_all(finished_sequences) # define condition fn def contrastive_search_body_fn( generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables ): """state update fn.""" # if the first step in the loop, encode all the prefix and obtain: (1) past_key_values; # (2) last_hidden_states; (3) logit_for_next_step; (4) update model kwargs for the next step if model_kwargs.get("past_key_values") is None: # prepare inputs model_inputs = self.prepare_inputs_for_generation( generated[:, :cur_len], use_cache=use_cache, **model_kwargs ) # encode the given prefix and prepare model inputs; encoder-decoder model process the prefix and save # the `encoder_outputs` outputs = self( **model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions ) # last decoder hidden states will be used to compute the degeneration penalty (cosine similarity with # previous tokens) if self.config.is_encoder_decoder: last_hidden_states = outputs.decoder_hidden_states[-1] else: last_hidden_states = outputs.hidden_states[-1] # XLA: last_hidden_states normally grows at each step, but in XLA it is padded so as to be used across # iterations (with fixed shapes) if use_xla: last_hidden_states = tf.pad(last_hidden_states, [[0, 0], [0, max_length - cur_len], [0, 0]]) # next logit for contrastive search to select top-k candidate tokens logit_for_next_step = outputs.logits[:, -1, :] if use_xla: model_kwargs = self._update_model_kwargs_for_xla_generation( model_outputs=outputs, model_kwargs=model_kwargs, cur_len=cur_len, max_length=max_length, batch_size=batch_size, is_encoder_decoder=self.config.is_encoder_decoder, batch_axis=cache_batch_axis, ) else: model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # Expands model inputs top_k times, for batched forward passes (akin to beam search). _, model_kwargs = self._expand_inputs_for_generation( expand_size=top_k, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs ) past_key_values = model_kwargs.get("past_key_values") if past_key_values is None: raise ValueError( f"{self.__class__.__name__} does not support caching and therefore **can't** be used " "for contrastive search." ) elif ( not isinstance(past_key_values[0], (tuple, tf.Tensor)) or past_key_values[0][0].shape[0] != batch_size ): raise ValueError( f"{self.__class__.__name__} does not have a standard cache format and therefore **can't** be " "used for contrastive search without further modifications." ) else: logit_for_next_step = next_step_cached_variables["logit_for_next_step"] last_hidden_states = next_step_cached_variables["last_hidden_states"] outputs = next_step_cached_variables["outputs"] # contrastive_search main logic start: # contrastive search decoding consists of two steps: (1) candidate tokens recall; (2) candidate re-rank by # degeneration penalty logit_for_next_step = logits_processor(generated, logit_for_next_step, cur_len) logit_for_next_step = logits_warper(generated, logit_for_next_step, cur_len) next_probs = stable_softmax(logit_for_next_step, axis=-1) top_k_probs, top_k_ids = tf.math.top_k(next_probs, k=top_k) # Store scores, attentions and hidden_states when required if not use_xla and return_dict_in_generate: if output_scores: scores.append(logit_for_next_step) if output_attentions and self.config.is_encoder_decoder: decoder_attentions.append(outputs.decoder_attentions) elif output_attentions and not self.config.is_encoder_decoder: decoder_attentions.append(outputs.attentions) if self.config.is_encoder_decoder: cross_attentions.append(outputs.cross_attentions) if output_hidden_states and self.config.is_encoder_decoder: decoder_hidden_states.append(outputs.decoder_hidden_states) elif output_hidden_states and self.config.is_encoder_decoder: decoder_hidden_states.append(outputs.hidden_states) # Replicates the new past_key_values to match the `top_k` candidates model_kwargs["past_key_values"] = tf.nest.map_structure( lambda tensor: tf.repeat(tensor, top_k, axis=cache_batch_axis), model_kwargs["past_key_values"] ) # compute the candidate tokens by the language model and collects their hidden_states next_model_inputs = self.prepare_inputs_for_generation( tf.reshape(top_k_ids, [-1, 1]), use_cache=use_cache, **model_kwargs ) outputs = self( **next_model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions ) next_past_key_values = self._extract_past_from_model_output(outputs) logits = outputs.logits[:, -1, :] # name is different for encoder-decoder and decoder-only models if self.config.is_encoder_decoder: next_hidden = outputs.decoder_hidden_states[-1] full_hidden_states = outputs.decoder_hidden_states else: next_hidden = outputs.hidden_states[-1] full_hidden_states = outputs.hidden_states context_hidden = tf.repeat(last_hidden_states[:, :cur_len, :], top_k, axis=0) # compute the degeneration penalty and re-rank the candidates based on the degeneration penalty and the # model confidence selected_idx = _ranking_fast(context_hidden, next_hidden, top_k_probs, penalty_alpha, top_k) # converts indices to a dimension of top_k to the stacked top_k * batch_size dimension, for indexing # without a need to reshape on tensors that have these two dimensions stacked selected_idx_stacked = selected_idx + tf.range(selected_idx.shape[0], dtype=tf.int64) * top_k # prepare for the next step: (1) next token_id; (2) past_key_values; (3) last_hidden_states for computing # the degeneration penalty; (4) logits for selecting next top-k candidates; (5) selected tokens scores # (model confidence minus degeneration penalty); (6) decoder hidden_states next_tokens = tf.gather(top_k_ids, selected_idx, axis=1, batch_dims=1) next_hidden = gather_best_candidate(next_hidden, selected_idx_stacked) # XLA: last_hidden_states normally grows at each step, but in XLA it is padded so as to be used across # iterations (with fixed shapes) if use_xla: last_hidden_states = dynamic_update_slice(last_hidden_states, next_hidden, [0, cur_len, 0]) else: last_hidden_states = tf.concat([last_hidden_states, next_hidden], axis=1) next_decoder_hidden_states = gather_best_candidate(full_hidden_states, selected_idx_stacked) next_past_key_values = gather_best_candidate( next_past_key_values, selected_idx_stacked, batch_axis=cache_batch_axis ) logit_for_next_step = gather_best_candidate(logits, selected_idx_stacked) # Rebuilds the relevant parts of the model output for the selected token, for use in the next iteration if self.config.is_encoder_decoder: next_step_cross_attentions = () next_step_decoder_attentions = () if output_attentions: next_step_cross_attentions = gather_best_candidate(outputs.cross_attentions, selected_idx_stacked) next_step_decoder_attentions = gather_best_candidate( outputs.decoder_attentions, selected_idx_stacked ) outputs = TFSeq2SeqLMOutput( past_key_values=next_past_key_values, decoder_hidden_states=next_decoder_hidden_states, decoder_attentions=next_step_decoder_attentions or None, cross_attentions=next_step_cross_attentions or None, ) else: next_step_attentions = () if output_attentions: next_step_attentions = gather_best_candidate(outputs.attentions, selected_idx_stacked) outputs = TFCausalLMOutputWithPast( past_key_values=next_past_key_values, hidden_states=next_decoder_hidden_states, attentions=next_step_attentions or None, ) # contrastive_search main logic end if eos_token_id is not None: if pad_token_id is None: raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") unfinished_seq = 1 - tf.cast(finished_sequences, tf.int32) next_tokens = next_tokens * unfinished_seq + pad_token_id * (1 - unfinished_seq) next_token_is_eos = tf.math.reduce_any( tf.equal( tf.broadcast_to(next_tokens, (len(eos_token_id), batch_size)), tf.expand_dims(eos_token_id, -1) ), axis=0, ) finished_sequences = finished_sequences | next_token_is_eos # update `generated` and `cur_len` update_indices = tf.stack([tf.range(batch_size), tf.broadcast_to(cur_len, [batch_size])], axis=-1) generated = tf.tensor_scatter_nd_update(tensor=generated, indices=update_indices, updates=next_tokens) cur_len += 1 if use_xla: # NOTE: 1) relative to other generation strategies, contrastive search is always running forward # passes one step ahead -- hence the `cur_len=cur_len + 1`; 2) the attention mask here is expanded from # [batch_size, ...] to [batch_size*top_k, ...] -- hence the `batch_size=batch_size * top_k` model_kwargs = self._update_model_kwargs_for_xla_generation( model_outputs=outputs, model_kwargs=model_kwargs, cur_len=cur_len + 1, max_length=max_length, batch_size=batch_size * top_k, is_encoder_decoder=self.config.is_encoder_decoder, batch_axis=cache_batch_axis, ) else: model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) next_step_cached_variables = { "logit_for_next_step": logit_for_next_step, "last_hidden_states": last_hidden_states, "outputs": outputs, } return generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables # 5. run generation # 1st generation step has to be run before to initialize `past_key_values` generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables = contrastive_search_body_fn( generated, finished_sequences, cur_len, model_kwargs, None ) # 2-to-n generation steps can then be run in autoregressive fashion # only in case 1st generation step does NOT yield EOS token though maximum_iterations = max_length - cur_len generated, _, cur_len, _, _ = tf.while_loop( contrastive_search_cond_fn, contrastive_search_body_fn, (generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables), maximum_iterations=maximum_iterations, ) # 6. prepare outputs if not use_xla: # cut for backward compatibility generated = generated[:, :cur_len] if return_dict_in_generate: if self.config.is_encoder_decoder: # if model is an encoder-decoder, retrieve encoder attention weights # and hidden states encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) scores = tuple(scores) if scores is not None else None decoder_attentions = tuple(decoder_attentions) if decoder_attentions is not None else None cross_attentions = tuple(cross_attentions) if cross_attentions is not None else None decoder_hidden_states = tuple(decoder_hidden_states) if decoder_hidden_states is not None else None return TFContrastiveSearchEncoderDecoderOutput( sequences=generated, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return TFContrastiveSearchDecoderOnlyOutput( sequences=generated, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return generated def tf_top_k_top_p_filtering(logits, top_k=0, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1): """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (batch size, vocabulary size) top_k (`int`, *optional*, defaults to 0): If > 0, only keep the top k tokens with highest probability (top-k filtering) top_p (`float`, *optional*, defaults to 1.0): If < 1.0, only keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimumber of tokens we keep per batch example in the output. From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 """ logits_shape = shape_list(logits) if top_k > 0: top_k = min(max(top_k, min_tokens_to_keep), logits_shape[-1]) # Safety check # Remove all tokens with a probability less than the last token of the top-k indices_to_remove = logits < tf.math.top_k(logits, k=top_k)[0][..., -1, None] logits = tf.where(indices_to_remove, filter_value, logits) if top_p < 1.0: sorted_indices = tf.argsort(logits, direction="DESCENDING") sorted_logits = tf.gather( logits, sorted_indices, axis=-1, batch_dims=1 ) # expects logits to be of dim (batch_size, vocab_size) cumulative_probs = tf.math.cumsum(stable_softmax(sorted_logits, axis=-1), axis=-1) # Remove tokens with cumulative probability above the threshold (token with 0 are kept) sorted_indices_to_remove = cumulative_probs > top_p if min_tokens_to_keep > 1: # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below) sorted_indices_to_remove = tf.concat( [ tf.zeros_like(sorted_indices_to_remove[:, :min_tokens_to_keep]), sorted_indices_to_remove[:, min_tokens_to_keep:], ], -1, ) # Shift the indices to the right to keep also the first token above the threshold sorted_indices_to_remove = tf.concat( [tf.zeros_like(sorted_indices_to_remove[:, :1]), sorted_indices_to_remove[:, :-1]], -1, ) # scatter sorted tensors to original indexing indices_to_remove = scatter_values_on_batch_indices(sorted_indices_to_remove, sorted_indices) logits = tf.where(indices_to_remove, filter_value, logits) return logits def scatter_values_on_batch_indices(values, batch_indices): shape = shape_list(batch_indices) # broadcast batch dim to shape broad_casted_batch_dims = tf.reshape(tf.broadcast_to(tf.expand_dims(tf.range(shape[0]), axis=-1), shape), [1, -1]) # transform batch_indices to pair_indices pair_indices = tf.transpose(tf.concat([broad_casted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0)) # scatter values to pair indices return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), shape) def sample_without_replacement(logits, num_samples): """ categorical sampling without replacement is currently not implemented the gumbel-max trick will do for now see https://github.com/tensorflow/tensorflow/issues/9260 for more info """ z = -tf.math.log(-tf.math.log(tf.random.uniform(shape_list(logits), 0, 1))) _, indices = tf.nn.top_k(logits + z, num_samples) return indices def _ranking_fast( context_hidden: tf.Tensor, next_hidden: tf.Tensor, next_top_k_probs: tf.Tensor, alpha: float, beam_width: int, ) -> tf.Tensor: """ Reranks the top_k candidates based on a degeneration penalty (cosine similarity with previous tokens), as described in the paper "A Contrastive Framework for Neural Text Generation". Returns the index of the best candidate for each row in the batch. """ norm_context_hidden = context_hidden / tf.norm(context_hidden, axis=2, keepdims=True) norm_next_hidden = next_hidden / tf.norm(next_hidden, axis=2, keepdims=True) cosine_matrix = tf.squeeze(tf.linalg.matmul(norm_context_hidden, norm_next_hidden, transpose_b=True), axis=-1) degeneration_penalty = tf.reduce_max(cosine_matrix, axis=-1) next_top_k_probs = tf.reshape(next_top_k_probs, shape=[-1]) contrastive_score = (1.0 - alpha) * next_top_k_probs - alpha * degeneration_penalty contrastive_score = tf.reshape(contrastive_score, shape=[-1, beam_width]) selected_idx = tf.argmax(contrastive_score, axis=1) return selected_idx
177,962
55.03369
327
py
transformers
transformers-main/src/transformers/generation/configuration_utils.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Generation configuration class and utilities.""" import copy import json import os import warnings from typing import Any, Dict, Optional, Union from .. import __version__ from ..configuration_utils import PretrainedConfig from ..utils import ( GENERATION_CONFIG_NAME, PushToHubMixin, cached_file, download_url, extract_commit_hash, is_remote_url, logging, ) logger = logging.get_logger(__name__) class GenerationConfig(PushToHubMixin): r""" Class that holds a configuration for a generation task. A `generate` call supports the following generation methods for text-decoder, text-to-text, speech-to-text, and vision-to-text models: - *greedy decoding* by calling [`~generation.GenerationMixin.greedy_search`] if `num_beams=1` and `do_sample=False` - *contrastive search* by calling [`~generation.GenerationMixin.contrastive_search`] if `penalty_alpha>0.` and `top_k>1` - *multinomial sampling* by calling [`~generation.GenerationMixin.sample`] if `num_beams=1` and `do_sample=True` - *beam-search decoding* by calling [`~generation.GenerationMixin.beam_search`] if `num_beams>1` and `do_sample=False` - *beam-search multinomial sampling* by calling [`~generation.GenerationMixin.beam_sample`] if `num_beams>1` and `do_sample=True` - *diverse beam-search decoding* by calling [`~generation.GenerationMixin.group_beam_search`], if `num_beams>1` and `num_beam_groups>1` - *constrained beam-search decoding* by calling [`~generation.GenerationMixin.constrained_beam_search`], if `constraints!=None` or `force_words_ids!=None` - *assisted decoding* by calling [`~generation.GenerationMixin.assisted_decoding`], if `assistant_model` is passed to `.generate()` You do not need to call any of the above methods directly. Pass custom parameter values to '.generate()'. To learn more about decoding strategies refer to the [text generation strategies guide](../generation_strategies). Arg: > Parameters that control the length of the output max_length (`int`, *optional*, defaults to 20): The maximum length the generated tokens can have. Corresponds to the length of the input prompt + `max_new_tokens`. Its effect is overridden by `max_new_tokens`, if also set. max_new_tokens (`int`, *optional*): The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt. min_length (`int`, *optional*, defaults to 0): The minimum length of the sequence to be generated. Corresponds to the length of the input prompt + `min_new_tokens`. Its effect is overridden by `min_new_tokens`, if also set. min_new_tokens (`int`, *optional*): The minimum numbers of tokens to generate, ignoring the number of tokens in the prompt. early_stopping (`bool` or `str`, *optional*, defaults to `False`): Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values: `True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an heuristic is applied and the generation stops when is it very unlikely to find better candidates; `"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical beam search algorithm). max_time(`float`, *optional*): The maximum amount of time you allow the computation to run for in seconds. generation will still finish the current pass after allocated time has been passed. > Parameters that control the generation strategy used do_sample (`bool`, *optional*, defaults to `False`): Whether or not to use sampling ; use greedy decoding otherwise. num_beams (`int`, *optional*, defaults to 1): Number of beams for beam search. 1 means no beam search. num_beam_groups (`int`, *optional*, defaults to 1): Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams. [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details. penalty_alpha (`float`, *optional*): The values balance the model confidence and the degeneration penalty in contrastive search decoding. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding. > Parameters for manipulation of the model output logits temperature (`float`, *optional*, defaults to 1.0): The value used to modulate the next token probabilities. top_k (`int`, *optional*, defaults to 50): The number of highest probability vocabulary tokens to keep for top-k-filtering. top_p (`float`, *optional*, defaults to 1.0): If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are kept for generation. typical_p (`float`, *optional*, defaults to 1.0): Local typicality measures how similar the conditional probability of predicting a target token next is to the expected conditional probability of predicting a random token next, given the partial text already generated. If set to float < 1, the smallest set of the most locally typical tokens with probabilities that add up to `typical_p` or higher are kept for generation. See [this paper](https://arxiv.org/pdf/2202.00666.pdf) for more details. epsilon_cutoff (`float`, *optional*, defaults to 0.0): If set to float strictly between 0 and 1, only tokens with a conditional probability greater than `epsilon_cutoff` will be sampled. In the paper, suggested values range from 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language Model Desmoothing](https://arxiv.org/abs/2210.15191) for more details. eta_cutoff (`float`, *optional*, defaults to 0.0): Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to float strictly between 0 and 1, a token is only considered if it is greater than either `eta_cutoff` or `sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits)))`. The latter term is intuitively the expected next token probability, scaled by `sqrt(eta_cutoff)`. In the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model. See [Truncation Sampling as Language Model Desmoothing](https://arxiv.org/abs/2210.15191) for more details. diversity_penalty (`float`, *optional*, defaults to 0.0): This value is subtracted from a beam's score if it generates a token same as any beam from other group at a particular time. Note that `diversity_penalty` is only effective if `group beam search` is enabled. repetition_penalty (`float`, *optional*, defaults to 1.0): The parameter for repetition penalty. 1.0 means no penalty. See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. encoder_repetition_penalty (`float`, *optional*, defaults to 1.0): The paramater for encoder_repetition_penalty. An exponential penalty on sequences that are not in the original input. 1.0 means no penalty. length_penalty (`float`, *optional*, defaults to 1.0): Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while `length_penalty` < 0.0 encourages shorter sequences. no_repeat_ngram_size (`int`, *optional*, defaults to 0): If set to int > 0, all ngrams of that size can only occur once. bad_words_ids(`List[List[int]]`, *optional*): List of list of token ids that are not allowed to be generated. Check [`~generation.NoBadWordsLogitsProcessor`] for further documentation and examples. force_words_ids(`List[List[int]]` or `List[List[List[int]]]`, *optional*): List of token ids that must be generated. If given a `List[List[int]]`, this is treated as a simple list of words that must be included, the opposite to `bad_words_ids`. If given `List[List[List[int]]]`, this triggers a [disjunctive constraint](https://github.com/huggingface/transformers/issues/14081), where one can allow different forms of each word. renormalize_logits (`bool`, *optional*, defaults to `False`): Whether to renormalize the logits after applying all the logits processors or warpers (including the custom ones). It's highly recommended to set this flag to `True` as the search algorithms suppose the score logits are normalized but some logit processors or warpers break the normalization. constraints (`List[Constraint]`, *optional*): Custom constraints that can be added to the generation to ensure that the output will contain the use of certain tokens as defined by `Constraint` objects, in the most sensible way possible. forced_bos_token_id (`int`, *optional*, defaults to `model.config.forced_bos_token_id`): The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target language token. forced_eos_token_id (`Union[int, List[int]]`, *optional*, defaults to `model.config.forced_eos_token_id`): The id of the token to force as the last generated token when `max_length` is reached. Optionally, use a list to set multiple *end-of-sequence* tokens. remove_invalid_values (`bool`, *optional*, defaults to `model.config.remove_invalid_values`): Whether to remove possible *nan* and *inf* outputs of the model to prevent the generation method to crash. Note that using `remove_invalid_values` can slow down generation. exponential_decay_length_penalty (`tuple(int, float)`, *optional*): This Tuple adds an exponentially increasing length penalty, after a certain amount of tokens have been generated. The tuple shall consist of: `(start_index, decay_factor)` where `start_index` indicates where penalty starts and `decay_factor` represents the factor of exponential decay suppress_tokens (`List[int]`, *optional*): A list of tokens that will be suppressed at generation. The `SupressTokens` logit processor will set their log probs to `-inf` so that they are not sampled. begin_suppress_tokens (`List[int]`, *optional*): A list of tokens that will be suppressed at the beginning of the generation. The `SupressBeginTokens` logit processor will set their log probs to `-inf` so that they are not sampled. forced_decoder_ids (`List[List[int]]`, *optional*): A list of pairs of integers which indicates a mapping from generation indices to token indices that will be forced before sampling. For example, `[[1, 123]]` means the second generated token will always be a token of index 123. sequence_bias (`Dict[Tuple[int], float]`, *optional*)): Dictionary that maps a sequence of tokens to its bias term. Positive biases increase the odds of the sequence being selected, while negative biases do the opposite. Check [`~generation.SequenceBiasLogitsProcessor`] for further documentation and examples. guidance_scale (`float`, *optional*): The guidance scale for classifier free guidance (CFG). CFG is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages the model to generate samples that are more closely linked to the input prompt, usually at the expense of poorer quality. > Parameters that define the output variables of `generate` num_return_sequences(`int`, *optional*, defaults to 1): The number of independently computed returned sequences for each element in the batch. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. > Special tokens that can be used at generation time pad_token_id (`int`, *optional*): The id of the *padding* token. bos_token_id (`int`, *optional*): The id of the *beginning-of-sequence* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. > Generation parameters exclusive to encoder-decoder models encoder_no_repeat_ngram_size (`int`, *optional*, defaults to 0): If set to int > 0, all ngrams of that size that occur in the `encoder_input_ids` cannot occur in the `decoder_input_ids`. decoder_start_token_id (`int`, *optional*): If an encoder-decoder model starts decoding with a different token than *bos*, the id of that token. > Wild card generation_kwargs: Additional generation kwargs will be forwarded to the `generate` function of the model. Kwargs that are not present in `generate`'s signature will be used in the model forward pass. """ def __init__(self, **kwargs): # Parameters that control the length of the output self.max_length = kwargs.pop("max_length", 20) self.max_new_tokens = kwargs.pop("max_new_tokens", None) self.min_length = kwargs.pop("min_length", 0) self.min_new_tokens = kwargs.pop("min_new_tokens", None) self.early_stopping = kwargs.pop("early_stopping", False) self.max_time = kwargs.pop("max_time", None) # Parameters that control the generation strategy used self.do_sample = kwargs.pop("do_sample", False) self.num_beams = kwargs.pop("num_beams", 1) self.num_beam_groups = kwargs.pop("num_beam_groups", 1) self.penalty_alpha = kwargs.pop("penalty_alpha", None) self.use_cache = kwargs.pop("use_cache", True) # Parameters for manipulation of the model output logits self.temperature = kwargs.pop("temperature", 1.0) self.top_k = kwargs.pop("top_k", 50) self.top_p = kwargs.pop("top_p", 1.0) self.typical_p = kwargs.pop("typical_p", 1.0) self.epsilon_cutoff = kwargs.pop("epsilon_cutoff", 0.0) self.eta_cutoff = kwargs.pop("eta_cutoff", 0.0) self.diversity_penalty = kwargs.pop("diversity_penalty", 0.0) self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0) self.encoder_repetition_penalty = kwargs.pop("encoder_repetition_penalty", 1.0) self.length_penalty = kwargs.pop("length_penalty", 1.0) self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0) self.bad_words_ids = kwargs.pop("bad_words_ids", None) self.force_words_ids = kwargs.pop("force_words_ids", None) self.renormalize_logits = kwargs.pop("renormalize_logits", False) self.constraints = kwargs.pop("constraints", None) self.forced_bos_token_id = kwargs.pop("forced_bos_token_id", None) self.forced_eos_token_id = kwargs.pop("forced_eos_token_id", None) self.remove_invalid_values = kwargs.pop("remove_invalid_values", False) self.exponential_decay_length_penalty = kwargs.pop("exponential_decay_length_penalty", None) self.suppress_tokens = kwargs.pop("suppress_tokens", None) self.begin_suppress_tokens = kwargs.pop("begin_suppress_tokens", None) self.forced_decoder_ids = kwargs.pop("forced_decoder_ids", None) self.sequence_bias = kwargs.pop("sequence_bias", None) self.guidance_scale = kwargs.pop("guidance_scale", None) # Parameters that define the output variables of `generate` self.num_return_sequences = kwargs.pop("num_return_sequences", 1) self.output_attentions = kwargs.pop("output_attentions", False) self.output_hidden_states = kwargs.pop("output_hidden_states", False) self.output_scores = kwargs.pop("output_scores", False) self.return_dict_in_generate = kwargs.pop("return_dict_in_generate", False) # Special tokens that can be used at generation time self.pad_token_id = kwargs.pop("pad_token_id", None) self.bos_token_id = kwargs.pop("bos_token_id", None) self.eos_token_id = kwargs.pop("eos_token_id", None) # Generation parameters exclusive to encoder-decoder models self.encoder_no_repeat_ngram_size = kwargs.pop("encoder_no_repeat_ngram_size", 0) self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None) # Wild card self.generation_kwargs = kwargs.pop("generation_kwargs", {}) # The remaining attributes do not parametrize `.generate()`, but are informative and/or used by the the hub # interface. self._from_model_config = kwargs.pop("_from_model_config", False) self._commit_hash = kwargs.pop("_commit_hash", None) self.transformers_version = kwargs.pop("transformers_version", __version__) # Additional attributes without default values if not self._from_model_config: # we don't want to copy values from the model config if we're initializing a `GenerationConfig` from a # model's default configuration file for key, value in kwargs.items(): try: setattr(self, key, value) except AttributeError as err: logger.error(f"Can't set {key} with value {value} for {self}") raise err # Validate the values of the attributes self.validate() def __eq__(self, other): if not isinstance(other, GenerationConfig): return False self_dict = self.__dict__.copy() other_dict = other.__dict__.copy() # ignore metadata for metadata_field in ("_from_model_config", "_commit_hash", "transformers_version"): self_dict.pop(metadata_field, None) other_dict.pop(metadata_field, None) return self_dict == other_dict def __repr__(self): return f"{self.__class__.__name__} {self.to_json_string()}" def validate(self): """ Validates the values of the attributes of the GenerationConfig instance, and raises a `ValueError` if any of the values are invalid. """ if self.early_stopping not in {True, False, "never"}: raise ValueError(f"`early_stopping` must be a boolean or 'never', but is {self.early_stopping}.") def save_pretrained( self, save_directory: Union[str, os.PathLike], config_file_name: Optional[Union[str, os.PathLike]] = None, push_to_hub: bool = False, **kwargs, ): r""" Save a generation configuration object to the directory `save_directory`, so that it can be re-loaded using the [`~GenerationConfig.from_pretrained`] class method. Args: save_directory (`str` or `os.PathLike`): Directory where the configuration JSON file will be saved (will be created if it does not exist). config_file_name (`str` or `os.PathLike`, *optional*, defaults to `"generation_config.json"`): Name of the generation configuration JSON file to be saved in `save_directory`. push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). kwargs (`Dict[str, Any]`, *optional*): Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ config_file_name = config_file_name if config_file_name is not None else GENERATION_CONFIG_NAME if os.path.isfile(save_directory): raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file") os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop("commit_message", None) repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) repo_id = self._create_repo(repo_id, **kwargs) files_timestamps = self._get_files_timestamps(save_directory) output_config_file = os.path.join(save_directory, config_file_name) self.to_json_file(output_config_file, use_diff=True) logger.info(f"Configuration saved in {output_config_file}") if push_to_hub: self._upload_modified_files( save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get("use_auth_token"), ) @classmethod def from_pretrained( cls, pretrained_model_name: Union[str, os.PathLike], config_file_name: Optional[Union[str, os.PathLike]] = None, cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, local_files_only: bool = False, token: Optional[Union[str, bool]] = None, revision: str = "main", **kwargs, ) -> "GenerationConfig": r""" Instantiate a [`GenerationConfig`] from a generation configuration file. Args: pretrained_model_name (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a pretrained model configuration hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. - a path to a *directory* containing a configuration file saved using the [`~GenerationConfig.save_pretrained`] method, e.g., `./my_model_directory/`. config_file_name (`str` or `os.PathLike`, *optional*, defaults to `"generation_config.json"`): Name of the generation configuration JSON file to be loaded from `pretrained_model_name`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. token (`str` or `bool`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. <Tip> To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>". </Tip> return_unused_kwargs (`bool`, *optional*, defaults to `False`): If `False`, then this function returns just the final configuration object. If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the part of `kwargs` which has not been used to update `config` and is otherwise ignored. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. kwargs (`Dict[str, Any]`, *optional*): The values in kwargs of any keys which are configuration attributes will be used to override the loaded values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled by the `return_unused_kwargs` keyword parameter. Returns: [`GenerationConfig`]: The configuration object instantiated from this pretrained model. Examples: ```python >>> from transformers import GenerationConfig >>> # Download configuration from huggingface.co and cache. >>> generation_config = GenerationConfig.from_pretrained("gpt2") >>> # E.g. config was saved using *save_pretrained('./test/saved_model/')* >>> generation_config.save_pretrained("./test/saved_model/") >>> generation_config = GenerationConfig.from_pretrained("./test/saved_model/") >>> # You can also specify configuration names to your generation configuration file >>> generation_config.save_pretrained("./test/saved_model/", config_file_name="my_configuration.json") >>> generation_config = GenerationConfig.from_pretrained("./test/saved_model/", "my_configuration.json") >>> # If you'd like to try a minor variation to an existing configuration, you can also pass generation >>> # arguments to `.from_pretrained()`. Be mindful that typos and unused arguments will be ignored >>> generation_config, unused_kwargs = GenerationConfig.from_pretrained( ... "gpt2", top_k=1, foo=False, return_unused_kwargs=True ... ) >>> generation_config.top_k 1 >>> unused_kwargs {'foo': False} ```""" config_file_name = config_file_name if config_file_name is not None else GENERATION_CONFIG_NAME resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) use_auth_token = kwargs.pop("use_auth_token", None) subfolder = kwargs.pop("subfolder", "") from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) commit_hash = kwargs.pop("_commit_hash", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers.", FutureWarning ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token user_agent = {"file_type": "config", "from_auto_class": from_auto_class} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline config_path = os.path.join(pretrained_model_name, config_file_name) config_path = str(config_path) is_local = os.path.exists(config_path) if os.path.isfile(os.path.join(subfolder, config_path)): # Special case when config_path is a local file resolved_config_file = config_path is_local = True elif is_remote_url(config_path): configuration_file = config_path resolved_config_file = download_url(config_path) else: configuration_file = config_file_name try: # Load from local folder or from cache or download from model Hub and cache resolved_config_file = cached_file( pretrained_model_name, configuration_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=token, user_agent=user_agent, revision=revision, subfolder=subfolder, _commit_hash=commit_hash, ) commit_hash = extract_commit_hash(resolved_config_file, commit_hash) except EnvironmentError: # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to # the original exception. raise except Exception: # For any other exception, we throw a generic error. raise EnvironmentError( f"Can't load the configuration of '{pretrained_model_name}'. If you were trying to load it" " from 'https://huggingface.co/models', make sure you don't have a local directory with the same" f" name. Otherwise, make sure '{pretrained_model_name}' is the correct path to a directory" f" containing a {configuration_file} file" ) try: # Load config dict config_dict = cls._dict_from_json_file(resolved_config_file) config_dict["_commit_hash"] = commit_hash except (json.JSONDecodeError, UnicodeDecodeError): raise EnvironmentError( f"It looks like the config file at '{resolved_config_file}' is not a valid JSON file." ) if is_local: logger.info(f"loading configuration file {resolved_config_file}") else: logger.info(f"loading configuration file {configuration_file} from cache at {resolved_config_file}") return cls.from_dict(config_dict, **kwargs) @classmethod def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]): with open(json_file, "r", encoding="utf-8") as reader: text = reader.read() return json.loads(text) @classmethod def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "GenerationConfig": """ Instantiates a [`GenerationConfig`] from a Python dictionary of parameters. Args: config_dict (`Dict[str, Any]`): Dictionary that will be used to instantiate the configuration object. kwargs (`Dict[str, Any]`): Additional parameters from which to initialize the configuration object. Returns: [`GenerationConfig`]: The configuration object instantiated from those parameters. """ return_unused_kwargs = kwargs.pop("return_unused_kwargs", False) # Those arguments may be passed along for our internal telemetry. # We remove them so they don't appear in `return_unused_kwargs`. kwargs.pop("_from_auto", None) kwargs.pop("_from_pipeline", None) # The commit hash might have been updated in the `config_dict`, we don't want the kwargs to erase that update. if "_commit_hash" in kwargs and "_commit_hash" in config_dict: kwargs["_commit_hash"] = config_dict["_commit_hash"] # The line below allows model-specific config to be loaded as well through kwargs, with safety checks. # See https://github.com/huggingface/transformers/pull/21269 config = cls(**{**config_dict, **kwargs}) unused_kwargs = config.update(**kwargs) logger.info(f"Generate config {config}") if return_unused_kwargs: return config, unused_kwargs else: return config def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None: """ Checks whether the passed dictionary and its nested dicts have a *torch_dtype* key and if it's not None, converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *"float32"* string, which can then be stored in the json format. """ if d.get("torch_dtype", None) is not None and not isinstance(d["torch_dtype"], str): d["torch_dtype"] = str(d["torch_dtype"]).split(".")[1] for value in d.values(): if isinstance(value, dict): self.dict_torch_dtype_to_str(value) def to_diff_dict(self) -> Dict[str, Any]: """ Removes all attributes from config which correspond to the default config attributes for better readability and serializes to a Python dictionary. Returns: `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance, """ config_dict = self.to_dict() # get the default config dict default_config_dict = GenerationConfig().to_dict() serializable_config_dict = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if key not in default_config_dict or key == "transformers_version" or value != default_config_dict[key]: serializable_config_dict[key] = value self.dict_torch_dtype_to_str(serializable_config_dict) return serializable_config_dict def to_dict(self) -> Dict[str, Any]: """ Serializes this instance to a Python dictionary. Returns: `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. """ output = copy.deepcopy(self.__dict__) if "_commit_hash" in output: del output["_commit_hash"] # Transformers version when serializing this file output["transformers_version"] = __version__ self.dict_torch_dtype_to_str(output) return output def to_json_string(self, use_diff: bool = True) -> str: """ Serializes this instance to a JSON string. Args: use_diff (`bool`, *optional*, defaults to `True`): If set to `True`, only the difference between the config instance and the default `GenerationConfig()` is serialized to JSON string. Returns: `str`: String containing all the attributes that make up this configuration instance in JSON format. """ if use_diff is True: config_dict = self.to_diff_dict() else: config_dict = self.to_dict() return json.dumps(config_dict, indent=2, sort_keys=True) + "\n" def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True): """ Save this instance to a JSON file. Args: json_file_path (`str` or `os.PathLike`): Path to the JSON file in which this configuration instance's parameters will be saved. use_diff (`bool`, *optional*, defaults to `True`): If set to `True`, only the difference between the config instance and the default `GenerationConfig()` is serialized to JSON file. """ with open(json_file_path, "w", encoding="utf-8") as writer: writer.write(self.to_json_string(use_diff=use_diff)) @classmethod def from_model_config(cls, model_config: PretrainedConfig) -> "GenerationConfig": """ Instantiates a [`GenerationConfig`] from a [`PretrainedConfig`]. This function is useful to convert legacy [`PretrainedConfig`] objects, which may contain generation parameters, into a stand-alone [`GenerationConfig`]. Args: model_config (`PretrainedConfig`): The model config that will be used to instantiate the generation config. Returns: [`GenerationConfig`]: The configuration object instantiated from those parameters. """ config_dict = model_config.to_dict() config_dict.pop("_from_model_config", None) config = cls.from_dict(config_dict, return_unused_kwargs=False, _from_model_config=True) # Special case: some models have generation attributes set in the decoder. Use them if still unset in the # generation config. for decoder_name in ("decoder", "generator", "text_config"): if decoder_name in config_dict: default_generation_config = GenerationConfig() decoder_config = config_dict[decoder_name] for attr in config.to_dict().keys(): if attr in decoder_config and getattr(config, attr) == getattr(default_generation_config, attr): setattr(config, attr, decoder_config[attr]) return config def update(self, **kwargs): """ Updates attributes of this class instance with attributes from `kwargs` if they match existing atributtes, returning all the unused kwargs. Args: kwargs (`Dict[str, Any]`): Dictionary of attributes to tentatively update this class. Returns: `Dict[str, Any]`: Dictionary containing all the key-value pairs that were not used to update the instance. """ to_remove = [] for key, value in kwargs.items(): if hasattr(self, key): setattr(self, key, value) to_remove.append(key) # remove all the attributes that were updated, without modifying the input dict unused_kwargs = {key: value for key, value in kwargs.items() if key not in to_remove} return unused_kwargs
40,270
53.493911
119
py
transformers
transformers-main/src/transformers/generation/streamers.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class BaseStreamer: """ Base class from which `.generate()` streamers should inherit. """ def put(self, value): """Function that is called by `.generate()` to push new tokens""" raise NotImplementedError() def end(self): """Function that is called by `.generate()` to signal the end of generation""" raise NotImplementedError() class TextStreamer(BaseStreamer): """ Simple text streamer that prints the token(s) to stdout as soon as entire words are formed. <Tip warning={true}> The API for the streamer classes is still under development and may change in the future. </Tip> Parameters: tokenizer (`AutoTokenizer`): The tokenized used to decode the tokens. skip_prompt (`bool`, *optional*, defaults to `False`): Whether to skip the prompt to `.generate()` or not. Useful e.g. for chatbots. decode_kwargs (`dict`, *optional*): Additional keyword arguments to pass to the tokenizer's `decode` method. Examples: ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer >>> tok = AutoTokenizer.from_pretrained("gpt2") >>> model = AutoModelForCausalLM.from_pretrained("gpt2") >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt") >>> streamer = TextStreamer(tok) >>> # Despite returning the usual output, the streamer will also print the generated text to stdout. >>> _ = model.generate(**inputs, streamer=streamer, max_new_tokens=20) An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven, ``` """ def __init__(self, tokenizer: "AutoTokenizer", skip_prompt: bool = False, **decode_kwargs): self.tokenizer = tokenizer self.skip_prompt = skip_prompt self.decode_kwargs = decode_kwargs # variables used in the streaming process self.token_cache = [] self.print_len = 0 self.next_tokens_are_prompt = True def put(self, value): """ Receives tokens, decodes them, and prints them to stdout as soon as they form entire words. """ if len(value.shape) > 1 and value.shape[0] > 1: raise ValueError("TextStreamer only supports batch size 1") elif len(value.shape) > 1: value = value[0] if self.skip_prompt and self.next_tokens_are_prompt: self.next_tokens_are_prompt = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist()) text = self.tokenizer.decode(self.token_cache, **self.decode_kwargs) # After the symbol for a new line, we flush the cache. if text.endswith("\n"): printable_text = text[self.print_len :] self.token_cache = [] self.print_len = 0 # If the last token is a CJK character, we print the characters. elif len(text) > 0 and self._is_chinese_char(ord(text[-1])): printable_text = text[self.print_len :] self.print_len += len(printable_text) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: printable_text = text[self.print_len : text.rfind(" ") + 1] self.print_len += len(printable_text) self.on_finalized_text(printable_text) def end(self): """Flushes any remaining cache and prints a newline to stdout.""" # Flush the cache, if it exists if len(self.token_cache) > 0: text = self.tokenizer.decode(self.token_cache, **self.decode_kwargs) printable_text = text[self.print_len :] self.token_cache = [] self.print_len = 0 else: printable_text = "" self.next_tokens_are_prompt = True self.on_finalized_text(printable_text, stream_end=True) def on_finalized_text(self, text: str, stream_end: bool = False): """Prints the new text to stdout. If the stream is ending, also prints a newline.""" print(text, flush=True, end="" if not stream_end else None) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False class TextIteratorStreamer(TextStreamer): """ Streamer that stores print-ready text in a queue, to be used by a downstream application as an iterator. This is useful for applications that benefit from acessing the generated text in a non-blocking way (e.g. in an interactive Gradio demo). <Tip warning={true}> The API for the streamer classes is still under development and may change in the future. </Tip> Parameters: tokenizer (`AutoTokenizer`): The tokenized used to decode the tokens. skip_prompt (`bool`, *optional*, defaults to `False`): Whether to skip the prompt to `.generate()` or not. Useful e.g. for chatbots. timeout (`float`, *optional*): The timeout for the text queue. If `None`, the queue will block indefinitely. Useful to handle exceptions in `.generate()`, when it is called in a separate thread. decode_kwargs (`dict`, *optional*): Additional keyword arguments to pass to the tokenizer's `decode` method. Examples: ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer >>> from threading import Thread >>> tok = AutoTokenizer.from_pretrained("gpt2") >>> model = AutoModelForCausalLM.from_pretrained("gpt2") >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt") >>> streamer = TextIteratorStreamer(tok) >>> # Run the generation in a separate thread, so that we can fetch the generated text in a non-blocking way. >>> generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=20) >>> thread = Thread(target=model.generate, kwargs=generation_kwargs) >>> thread.start() >>> generated_text = "" >>> for new_text in streamer: ... generated_text += new_text >>> generated_text 'An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven,' ``` """ def __init__( self, tokenizer: "AutoTokenizer", skip_prompt: bool = False, timeout: Optional[float] = None, **decode_kwargs ): super().__init__(tokenizer, skip_prompt, **decode_kwargs) self.text_queue = Queue() self.stop_signal = None self.timeout = timeout def on_finalized_text(self, text: str, stream_end: bool = False): """Put the new text in the queue. If the stream is ending, also put a stop signal in the queue.""" self.text_queue.put(text, timeout=self.timeout) if stream_end: self.text_queue.put(self.stop_signal, timeout=self.timeout) def __iter__(self): return self def __next__(self): value = self.text_queue.get(timeout=self.timeout) if value == self.stop_signal: raise StopIteration() else: return value
9,145
39.114035
119
py
transformers
transformers-main/src/transformers/generation/utils.py
# coding=utf-8 # Copyright 2020 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team. # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import warnings from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union import torch import torch.distributed as dist from torch import nn from ..deepspeed import is_deepspeed_zero3_enabled from ..modeling_outputs import CausalLMOutputWithPast, Seq2SeqLMOutput from ..models.auto import ( MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, MODEL_FOR_VISION_2_SEQ_MAPPING, ) from ..utils import ModelOutput, logging from .beam_constraints import DisjunctiveConstraint, PhrasalConstraint from .beam_search import BeamScorer, BeamSearchScorer, ConstrainedBeamSearchScorer from .configuration_utils import GenerationConfig from .logits_process import ( ClassifierFreeGuidanceLogitsProcessor, EncoderNoRepeatNGramLogitsProcessor, EncoderRepetitionPenaltyLogitsProcessor, EpsilonLogitsWarper, EtaLogitsWarper, ExponentialDecayLengthPenalty, ForcedBOSTokenLogitsProcessor, ForcedEOSTokenLogitsProcessor, ForceTokensLogitsProcessor, HammingDiversityLogitsProcessor, InfNanRemoveLogitsProcessor, LogitNormalization, LogitsProcessorList, MinLengthLogitsProcessor, MinNewTokensLengthLogitsProcessor, NoBadWordsLogitsProcessor, NoRepeatNGramLogitsProcessor, PrefixConstrainedLogitsProcessor, RepetitionPenaltyLogitsProcessor, SequenceBiasLogitsProcessor, SuppressTokensAtBeginLogitsProcessor, SuppressTokensLogitsProcessor, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, TypicalLogitsWarper, ) from .stopping_criteria import ( MaxLengthCriteria, MaxTimeCriteria, StoppingCriteria, StoppingCriteriaList, validate_stopping_criteria, ) if TYPE_CHECKING: from ..modeling_utils import PreTrainedModel from .streamers import BaseStreamer logger = logging.get_logger(__name__) @dataclass class GreedySearchDecoderOnlyOutput(ModelOutput): """ Base class for outputs of decoder-only generation models using greedy search. Args: sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size, config.vocab_size)`. attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`. """ sequences: torch.LongTensor = None scores: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None @dataclass class ContrastiveSearchEncoderDecoderOutput(ModelOutput): """ Base class for outputs of decoder-only generation models using contrastive search. Args: sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size, config.vocab_size)`. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. decoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. cross_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`. """ sequences: torch.LongTensor = None scores: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None @dataclass class ContrastiveSearchDecoderOnlyOutput(ModelOutput): """ Base class for outputs of decoder-only generation models using contrastive search. Args: sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size, config.vocab_size)`. attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`. """ sequences: torch.LongTensor = None scores: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None @dataclass class GreedySearchEncoderDecoderOutput(ModelOutput): """ Base class for outputs of encoder-decoder generation models using greedy search. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) Args: sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size, config.vocab_size)`. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. decoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. cross_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`. """ sequences: torch.LongTensor = None scores: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None @dataclass class SampleDecoderOnlyOutput(ModelOutput): """ Base class for outputs of decoder-only generation models using sampling. Args: sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size*num_return_sequences, config.vocab_size)`. attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(num_return_sequences*batch_size, num_heads, generated_length, sequence_length)`. hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(num_return_sequences*batch_size, generated_length, hidden_size)`. """ sequences: torch.LongTensor = None scores: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None @dataclass class SampleEncoderDecoderOutput(ModelOutput): """ Base class for outputs of encoder-decoder generation models using sampling. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) Args: sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size*num_return_sequences, config.vocab_size)`. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer of the decoder) of shape `(batch_size*num_return_sequences, num_heads, sequence_length, sequence_length)`. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size*num_return_sequences, sequence_length, hidden_size)`. decoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size*num_return_sequences, num_heads, generated_length, sequence_length)`. cross_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size*num_return_sequences, generated_length, hidden_size)`. """ sequences: torch.LongTensor = None scores: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None @dataclass class BeamSearchDecoderOnlyOutput(ModelOutput): """ Base class for outputs of decoder-only generation models using beam search. Args: sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. sequences_scores (`torch.FloatTensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Final beam scores of the generated `sequences`. scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size*num_beams*num_return_sequences, config.vocab_size)`. beam_indices (`torch.LongTensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Beam indices of generated token id at each generation step. `torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`. attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`. hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`. """ sequences: torch.LongTensor = None sequences_scores: Optional[torch.FloatTensor] = None scores: Optional[Tuple[torch.FloatTensor]] = None beam_indices: Optional[torch.LongTensor] = None attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None @dataclass class BeamSearchEncoderDecoderOutput(ModelOutput): """ Base class for outputs of encoder-decoder generation models using beam search. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) Args: sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. sequences_scores (`torch.FloatTensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Final beam scores of the generated `sequences`. scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size*num_beams, config.vocab_size)`. beam_indices (`torch.LongTensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Beam indices of generated token id at each generation step. `torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size*num_beams*num_return_sequences, sequence_length, hidden_size)`. decoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size*num_beams*num_return_sequences, num_heads, generated_length, sequence_length)`. cross_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`. """ sequences: torch.LongTensor = None sequences_scores: Optional[torch.FloatTensor] = None scores: Optional[Tuple[torch.FloatTensor]] = None beam_indices: Optional[torch.LongTensor] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None @dataclass class BeamSampleDecoderOnlyOutput(ModelOutput): """ Base class for outputs of decoder-only generation models using beam sample. Args: sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. sequences_scores (`torch.FloatTensor` of shape `(batch_size * num_return_sequence)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Final beam scores of the generated `sequences`. scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size*num_beams*num_return_sequences, config.vocab_size)`. beam_indices (`torch.LongTensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Beam indices of generated token id at each generation step. `torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`. attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`. hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size*num_beams, generated_length, hidden_size)`. """ sequences: torch.LongTensor = None sequences_scores: Optional[torch.FloatTensor] = None scores: Optional[Tuple[torch.FloatTensor]] = None beam_indices: Optional[torch.LongTensor] = None attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None @dataclass class BeamSampleEncoderDecoderOutput(ModelOutput): """ Base class for outputs of encoder-decoder generation models using beam sampling. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) Args: sequences (`torch.LongTensor` of shape `(batch_size*num_beams, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. sequences_scores (`torch.FloatTensor` of shape `(batch_size * num_return_sequence)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Final beam scores of the generated `sequences`. scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size*num_beams, config.vocab_size)`). beam_indices (`torch.LongTensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Beam indices of generated token id at each generation step. `torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size*num_beams, sequence_length, hidden_size)`. decoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`. cross_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size*num_beams, generated_length, hidden_size)`. """ sequences: torch.LongTensor = None sequences_scores: Optional[torch.FloatTensor] = None scores: Optional[Tuple[torch.FloatTensor]] = None beam_indices: Optional[torch.LongTensor] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None GreedySearchOutput = Union[GreedySearchEncoderDecoderOutput, GreedySearchDecoderOnlyOutput] SampleOutput = Union[SampleEncoderDecoderOutput, SampleDecoderOnlyOutput] BeamSearchOutput = Union[BeamSearchEncoderDecoderOutput, BeamSearchDecoderOnlyOutput] BeamSampleOutput = Union[BeamSampleEncoderDecoderOutput, BeamSampleDecoderOnlyOutput] ContrastiveSearchOutput = Union[ContrastiveSearchEncoderDecoderOutput, ContrastiveSearchDecoderOnlyOutput] GenerateOutput = Union[GreedySearchOutput, SampleOutput, BeamSearchOutput, BeamSampleOutput, ContrastiveSearchOutput] class GenerationMixin: """ A class containing all functions for auto-regressive text generation, to be used as a mixin in [`PreTrainedModel`]. The class exposes [`~generation.GenerationMixin.generate`], which can be used for: - *greedy decoding* by calling [`~generation.GenerationMixin.greedy_search`] if `num_beams=1` and `do_sample=False` - *contrastive search* by calling [`~generation.GenerationMixin.contrastive_search`] if `penalty_alpha>0` and `top_k>1` - *multinomial sampling* by calling [`~generation.GenerationMixin.sample`] if `num_beams=1` and `do_sample=True` - *beam-search decoding* by calling [`~generation.GenerationMixin.beam_search`] if `num_beams>1` and `do_sample=False` - *beam-search multinomial sampling* by calling [`~generation.GenerationMixin.beam_sample`] if `num_beams>1` and `do_sample=True` - *diverse beam-search decoding* by calling [`~generation.GenerationMixin.group_beam_search`], if `num_beams>1` and `num_beam_groups>1` - *constrained beam-search decoding* by calling [`~generation.GenerationMixin.constrained_beam_search`], if `constraints!=None` or `force_words_ids!=None` You do not need to call any of the above methods directly. Pass custom parameter values to 'generate' instead. To learn more about decoding strategies refer to the [text generation strategies guide](../generation_strategies). """ def prepare_inputs_for_generation(self, *args, **kwargs): raise NotImplementedError( "A model class needs to define a `prepare_inputs_for_generation` method in order to use `.generate()`." ) def _prepare_model_inputs( self, inputs: Optional[torch.Tensor] = None, bos_token_id: Optional[int] = None, model_kwargs: Optional[Dict[str, torch.Tensor]] = None, ) -> Tuple[torch.Tensor, Optional[str], Dict[str, torch.Tensor]]: """ This function extracts the model-specific `inputs` for generation. """ # 1. retrieve all kwargs that are non-None or non-model input related. # some encoder-decoder models have different names for model and encoder if ( self.config.is_encoder_decoder and hasattr(self, "encoder") and self.encoder.main_input_name != self.main_input_name ): input_name = self.encoder.main_input_name else: input_name = self.main_input_name model_kwargs = {k: v for k, v in model_kwargs.items() if v is not None or k != input_name} # 2. check whether model_input_name is passed as kwarg # if yes and `inputs` is None use kwarg inputs inputs_kwarg = model_kwargs.pop(input_name, None) if inputs_kwarg is not None and inputs is not None: raise ValueError( f"`inputs`: {inputs}` were passed alongside {input_name} which is not allowed." f"Make sure to either pass {inputs} or {input_name}=..." ) elif inputs_kwarg is not None: inputs = inputs_kwarg # 3. In the presence of `inputs_embeds` for text models: # - decoder-only models should complain if the user attempts to pass `inputs_embeds`, but the model # doesn't have its forwarding implemented. `inputs_embeds` is kept in `model_kwargs` and can coexist with # input_ids (`inputs_embeds` will be used in the 1st generation step, as opposed to `input_ids`) # - encoder-decoder models should complain if the user attempts to pass `inputs_embeds` and `input_ids`, and # pull the former to inputs. It will be used in place of `input_ids` to get the encoder hidden states. if input_name == "input_ids" and "inputs_embeds" in model_kwargs: if not self.config.is_encoder_decoder: has_inputs_embeds_forwarding = "inputs_embeds" in set( inspect.signature(self.prepare_inputs_for_generation).parameters.keys() ) if not has_inputs_embeds_forwarding: raise ValueError( f"You passed `inputs_embeds` to `.generate()`, but the model class {self.__class__.__name__} " "doesn't have its forwarding implemented. See the GPT2 implementation for an example " "(https://github.com/huggingface/transformers/pull/21405), and feel free to open a PR with it!" ) # In this case, `input_ids` is moved to the `model_kwargs`, so a few automations (like the creation of # the attention mask) can rely on the actual model input. model_kwargs["input_ids"] = self._maybe_initialize_input_ids_for_generation( inputs, bos_token_id, model_kwargs=model_kwargs ) else: if inputs is not None: raise ValueError("You passed `inputs_embeds` and `input_ids` to `.generate()`. Please pick one.") inputs, input_name = model_kwargs["inputs_embeds"], "inputs_embeds" # 4. if `inputs` is still None, try to create `input_ids` from BOS token inputs = self._maybe_initialize_input_ids_for_generation(inputs, bos_token_id, model_kwargs) return inputs, input_name, model_kwargs def adjust_logits_during_generation(self, logits: torch.FloatTensor, **kwargs) -> torch.FloatTensor: """ Implement in subclasses of [`PreTrainedModel`] for custom behavior to adjust the logits in the generate method. """ return logits def _maybe_initialize_input_ids_for_generation( self, inputs: Optional[torch.Tensor] = None, bos_token_id: Optional[int] = None, model_kwargs: Optional[Dict[str, torch.Tensor]] = None, ) -> torch.LongTensor: """Initializes input ids for generation, if necessary.""" if inputs is not None: return inputs encoder_outputs = model_kwargs.get("encoder_outputs") if self.config.is_encoder_decoder and encoder_outputs is not None: # make dummy input_ids with value -100, as a sanity check ensuring that they won't be used for encoding shape = encoder_outputs.last_hidden_state.size()[:-1] return torch.ones(shape, dtype=torch.long, device=self.device) * -100 if bos_token_id is None: raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.") # If there is some tensor in `model_kwargs`, we can infer the batch size from it. This is helpful with # soft-prompting or in multimodal implementations built on top of decoder-only language models. batch_size = 1 for value in model_kwargs.values(): if isinstance(value, torch.Tensor): batch_size = value.shape[0] break return torch.ones((batch_size, 1), dtype=torch.long, device=self.device) * bos_token_id def _prepare_attention_mask_for_generation( self, inputs: torch.Tensor, pad_token_id: Optional[int], eos_token_id: Optional[Union[int, List[int]]], ) -> torch.LongTensor: is_input_ids = len(inputs.shape) == 2 and inputs.dtype in [torch.int, torch.long] is_pad_token_in_inputs = (pad_token_id is not None) and (pad_token_id in inputs) if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or (pad_token_id not in eos_token_id) # Check if input is input_ids and padded -> only then is attention_mask defined if is_input_ids and is_pad_token_in_inputs and is_pad_token_not_equal_to_eos_token_id: return inputs.ne(pad_token_id).long() else: return torch.ones(inputs.shape[:2], dtype=torch.long, device=inputs.device) def _prepare_encoder_decoder_kwargs_for_generation( self, inputs_tensor: torch.Tensor, model_kwargs, model_input_name: Optional[str] = None ) -> Dict[str, Any]: # 1. get encoder encoder = self.get_encoder() # Compatibility with Accelerate big model inference: we need the encoder to outputs stuff on the same device # as the inputs. if hasattr(encoder, "_hf_hook"): encoder._hf_hook.io_same_device = True # 2. Prepare encoder args and encoder kwargs from model kwargs. irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"] encoder_kwargs = { argument: value for argument, value in model_kwargs.items() if not any(argument.startswith(p) for p in irrelevant_prefix) } encoder_signature = set(inspect.signature(encoder.forward).parameters) encoder_accepts_wildcard = "kwargs" in encoder_signature or "model_kwargs" in encoder_signature if not encoder_accepts_wildcard: encoder_kwargs = { argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature } # 3. make sure that encoder returns `ModelOutput` model_input_name = model_input_name if model_input_name is not None else self.main_input_name encoder_kwargs["return_dict"] = True encoder_kwargs[model_input_name] = inputs_tensor model_kwargs["encoder_outputs"]: ModelOutput = encoder(**encoder_kwargs) return model_kwargs def _prepare_decoder_input_ids_for_generation( self, batch_size: int, model_input_name: str, model_kwargs: Dict[str, torch.Tensor], decoder_start_token_id: int = None, bos_token_id: int = None, device: torch.device = None, ) -> Tuple[torch.LongTensor, Dict[str, torch.Tensor]]: """Prepares `decoder_input_ids` for generation with encoder-decoder models""" # 1. Check whether the user has defined `decoder_input_ids` manually. To facilitate in terms of input naming, # we also allow the user to pass it under `input_ids`, if the encoder does not use it as the main input. if model_kwargs is not None and "decoder_input_ids" in model_kwargs: decoder_input_ids = model_kwargs.pop("decoder_input_ids") elif "input_ids" in model_kwargs and model_input_name != "input_ids": decoder_input_ids = model_kwargs.pop("input_ids") else: decoder_input_ids = None # 2. Encoder-decoder models expect the `decoder_input_ids` to start with a special token. Let's ensure that. decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id) if device is None: device = self.device decoder_input_ids_start = torch.ones((batch_size, 1), dtype=torch.long, device=device) * decoder_start_token_id # no user input -> use decoder_start_token_id as decoder_input_ids if decoder_input_ids is None: decoder_input_ids = decoder_input_ids_start # exception: Donut checkpoints have task-specific decoder starts and don't expect a BOS token elif self.config.model_type == "vision-encoder-decoder" and "donut" in self.name_or_path.lower(): pass # user input but doesn't start with decoder_start_token_id -> prepend decoder_start_token_id (and adjust # decoder_attention_mask if provided) elif (decoder_input_ids[:, 0] != decoder_start_token_id).all().item(): decoder_input_ids = torch.cat([decoder_input_ids_start, decoder_input_ids], dim=-1) if "decoder_attention_mask" in model_kwargs: decoder_attention_mask = model_kwargs["decoder_attention_mask"] decoder_attention_mask = torch.cat( (torch.ones_like(decoder_attention_mask)[:, :1], decoder_attention_mask), dim=-1, ) model_kwargs["decoder_attention_mask"] = decoder_attention_mask return decoder_input_ids, model_kwargs def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int: decoder_start_token_id = ( decoder_start_token_id if decoder_start_token_id is not None else self.generation_config.decoder_start_token_id ) bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id if decoder_start_token_id is not None: return decoder_start_token_id elif bos_token_id is not None: return bos_token_id raise ValueError( "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation." ) @staticmethod def _expand_inputs_for_generation( expand_size: int = 1, is_encoder_decoder: bool = False, input_ids: Optional[torch.LongTensor] = None, **model_kwargs, ) -> Tuple[torch.LongTensor, Dict[str, Any]]: """Expands tensors from [batch_size, ...] to [batch_size * expand_size, ...]""" def _expand_dict_for_generation(dict_to_expand): for key in dict_to_expand: if dict_to_expand[key] is not None and isinstance(dict_to_expand[key], torch.Tensor): dict_to_expand[key] = dict_to_expand[key].repeat_interleave(expand_size, dim=0) return dict_to_expand if input_ids is not None: input_ids = input_ids.repeat_interleave(expand_size, dim=0) model_kwargs = _expand_dict_for_generation(model_kwargs) if is_encoder_decoder: if model_kwargs.get("encoder_outputs") is None: raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.") model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"]) return input_ids, model_kwargs def _extract_past_from_model_output(self, outputs: ModelOutput, standardize_cache_format: bool = False): past_key_values = None if "past_key_values" in outputs: past_key_values = outputs.past_key_values elif "mems" in outputs: past_key_values = outputs.mems elif "past_buckets_states" in outputs: past_key_values = outputs.past_buckets_states # Bloom fix: standardizes the cache format when requested if standardize_cache_format and hasattr(self, "_convert_to_standard_cache"): batch_size = outputs.logits.shape[0] past_key_values = self._convert_to_standard_cache(past_key_values, batch_size=batch_size) return past_key_values def _update_model_kwargs_for_generation( self, outputs: ModelOutput, model_kwargs: Dict[str, Any], is_encoder_decoder: bool = False, standardize_cache_format: bool = False, ) -> Dict[str, Any]: # update past_key_values model_kwargs["past_key_values"] = self._extract_past_from_model_output( outputs, standardize_cache_format=standardize_cache_format ) if getattr(outputs, "state", None) is not None: model_kwargs["state"] = outputs.state # update token_type_ids with last value if "token_type_ids" in model_kwargs: token_type_ids = model_kwargs["token_type_ids"] model_kwargs["token_type_ids"] = torch.cat([token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1) if not is_encoder_decoder: # update attention mask if "attention_mask" in model_kwargs: attention_mask = model_kwargs["attention_mask"] model_kwargs["attention_mask"] = torch.cat( [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1 ) else: # update decoder attention mask if "decoder_attention_mask" in model_kwargs: decoder_attention_mask = model_kwargs["decoder_attention_mask"] model_kwargs["decoder_attention_mask"] = torch.cat( [decoder_attention_mask, decoder_attention_mask.new_ones((decoder_attention_mask.shape[0], 1))], dim=-1, ) return model_kwargs def _reorder_cache(self, past_key_values, beam_idx): raise NotImplementedError( f"Make sure that a `_reorder_cache` function is correctly implemented in {self.__class__.__module__} to" f" enable beam search for {self.__class__}" ) def _get_logits_warper( self, generation_config: GenerationConfig, ) -> LogitsProcessorList: """ This class returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsWarper`] instances used for multinomial sampling. """ # instantiate warpers list warpers = LogitsProcessorList() # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files # all samplers can be found in `generation_utils_samplers.py` if generation_config.temperature is not None and generation_config.temperature != 1.0: warpers.append(TemperatureLogitsWarper(generation_config.temperature)) min_tokens_to_keep = 2 if generation_config.num_beams > 1 else 1 if generation_config.top_k is not None and generation_config.top_k != 0: warpers.append(TopKLogitsWarper(top_k=generation_config.top_k, min_tokens_to_keep=min_tokens_to_keep)) if generation_config.top_p is not None and generation_config.top_p < 1.0: warpers.append(TopPLogitsWarper(top_p=generation_config.top_p, min_tokens_to_keep=min_tokens_to_keep)) if generation_config.typical_p is not None and generation_config.typical_p < 1.0: warpers.append( TypicalLogitsWarper(mass=generation_config.typical_p, min_tokens_to_keep=min_tokens_to_keep) ) if generation_config.epsilon_cutoff is not None and 0.0 < generation_config.epsilon_cutoff < 1.0: warpers.append( EpsilonLogitsWarper(epsilon=generation_config.epsilon_cutoff, min_tokens_to_keep=min_tokens_to_keep) ) if generation_config.eta_cutoff is not None and 0.0 < generation_config.eta_cutoff < 1.0: warpers.append( EtaLogitsWarper(epsilon=generation_config.eta_cutoff, min_tokens_to_keep=min_tokens_to_keep) ) # `LogitNormalization` should always be the last logit processor, when present if generation_config.renormalize_logits is True: warpers.append(LogitNormalization()) return warpers def _get_logits_processor( self, generation_config: GenerationConfig, input_ids_seq_length: int, encoder_input_ids: torch.LongTensor, prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]], logits_processor: Optional[LogitsProcessorList], ) -> LogitsProcessorList: """ This class returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsProcessor`] instances used to modify the scores of the language model head. """ # instantiate processors list processors = LogitsProcessorList() if generation_config.sequence_bias is not None: processors.append(SequenceBiasLogitsProcessor(sequence_bias=generation_config.sequence_bias)) if generation_config.diversity_penalty is not None and generation_config.diversity_penalty > 0.0: processors.append( HammingDiversityLogitsProcessor( diversity_penalty=generation_config.diversity_penalty, num_beams=generation_config.num_beams, num_beam_groups=generation_config.num_beam_groups, ) ) if ( generation_config.encoder_repetition_penalty is not None and generation_config.encoder_repetition_penalty != 1.0 ): processors.append( EncoderRepetitionPenaltyLogitsProcessor( penalty=generation_config.encoder_repetition_penalty, encoder_input_ids=encoder_input_ids ) ) if generation_config.repetition_penalty is not None and generation_config.repetition_penalty != 1.0: processors.append(RepetitionPenaltyLogitsProcessor(penalty=generation_config.repetition_penalty)) if generation_config.no_repeat_ngram_size is not None and generation_config.no_repeat_ngram_size > 0: processors.append(NoRepeatNGramLogitsProcessor(generation_config.no_repeat_ngram_size)) if ( generation_config.encoder_no_repeat_ngram_size is not None and generation_config.encoder_no_repeat_ngram_size > 0 ): if self.config.is_encoder_decoder: processors.append( EncoderNoRepeatNGramLogitsProcessor( generation_config.encoder_no_repeat_ngram_size, encoder_input_ids ) ) else: raise ValueError( "It's impossible to use `encoder_no_repeat_ngram_size` with decoder-only architecture" ) if generation_config.bad_words_ids is not None: processors.append( NoBadWordsLogitsProcessor(generation_config.bad_words_ids, generation_config.eos_token_id) ) if ( generation_config.min_length is not None and generation_config.eos_token_id is not None and generation_config.min_length > 0 ): processors.append(MinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id)) if ( generation_config.min_new_tokens is not None and generation_config.eos_token_id is not None and generation_config.min_new_tokens > 0 ): processors.append( MinNewTokensLengthLogitsProcessor( input_ids_seq_length, generation_config.min_new_tokens, generation_config.eos_token_id ) ) if prefix_allowed_tokens_fn is not None: processors.append( PrefixConstrainedLogitsProcessor( prefix_allowed_tokens_fn, generation_config.num_beams // generation_config.num_beam_groups ) ) if generation_config.forced_bos_token_id is not None: processors.append(ForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id)) if generation_config.forced_eos_token_id is not None: processors.append( ForcedEOSTokenLogitsProcessor(generation_config.max_length, generation_config.forced_eos_token_id) ) if generation_config.remove_invalid_values is True: processors.append(InfNanRemoveLogitsProcessor()) if generation_config.exponential_decay_length_penalty is not None: processors.append( ExponentialDecayLengthPenalty( generation_config.exponential_decay_length_penalty, generation_config.eos_token_id, input_ids_seq_length, ) ) if generation_config.suppress_tokens is not None: processors.append(SuppressTokensLogitsProcessor(generation_config.suppress_tokens)) if generation_config.begin_suppress_tokens is not None: begin_index = input_ids_seq_length begin_index = ( begin_index if (input_ids_seq_length > 1 or generation_config.forced_bos_token_id is None) else begin_index + 1 ) if generation_config.forced_decoder_ids is not None: # generation starts after the last token that is forced begin_index += generation_config.forced_decoder_ids[-1][0] processors.append( SuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index) ) if generation_config.forced_decoder_ids is not None: processors.append(ForceTokensLogitsProcessor(generation_config.forced_decoder_ids)) if generation_config.guidance_scale is not None and generation_config.guidance_scale > 1: processors.append(ClassifierFreeGuidanceLogitsProcessor(generation_config.guidance_scale)) processors = self._merge_criteria_processor_list(processors, logits_processor) # `LogitNormalization` should always be the last logit processor, when present if generation_config.renormalize_logits is True: processors.append(LogitNormalization()) return processors def _get_stopping_criteria( self, generation_config: GenerationConfig, stopping_criteria: Optional[StoppingCriteriaList] ) -> StoppingCriteriaList: criteria = StoppingCriteriaList() if generation_config.max_length is not None: max_position_embeddings = getattr(self.config, "max_position_embeddings", None) criteria.append( MaxLengthCriteria( max_length=generation_config.max_length, max_position_embeddings=max_position_embeddings, ) ) if generation_config.max_time is not None: criteria.append(MaxTimeCriteria(max_time=generation_config.max_time)) criteria = self._merge_criteria_processor_list(criteria, stopping_criteria) return criteria def _merge_criteria_processor_list( self, default_list: Union[LogitsProcessorList, StoppingCriteriaList], custom_list: Union[LogitsProcessorList, StoppingCriteriaList], ) -> Union[LogitsProcessorList, StoppingCriteriaList]: if len(custom_list) == 0: return default_list for default in default_list: for custom in custom_list: if type(custom) is type(default): object_type = "stopping criteria" if isinstance(custom, StoppingCriteria) else "logits processor" raise ValueError( f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to" f" `.generate()`, but it has already been created with the values {default}. {default} has been" " created by passing the corresponding arguments to generate or by the model's config default" f" values. If you just want to change the default values of {object_type} consider passing" f" them as arguments to `.generate()` instead of using a custom {object_type}." ) default_list.extend(custom_list) return default_list def compute_transition_scores( self, sequences: torch.Tensor, scores: Tuple[torch.Tensor], beam_indices: Optional[torch.Tensor] = None, normalize_logits: bool = False, ) -> torch.Tensor: """ Computes the transition scores of sequences given the generation scores (and beam indices, if beam search was used). This is a convenient method to quicky obtain the scores of the selected tokens at generation time. Parameters: sequences (`torch.LongTensor`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. scores (`tuple(torch.FloatTensor)`): Transition scores for each vocabulary token at each generation step. Beam transition scores consisting of log probabilities of tokens conditioned on log softmax of previously generated tokens Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size*num_beams, config.vocab_size)`. beam_indices (`torch.LongTensor`, *optional*): Beam indices of generated token id at each generation step. `torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`. Only required if a `num_beams>1` at generate-time. normalize_logits (`bool`, *optional*, defaults to `False`): Whether to normalize the logits (which, for legacy reasons, may be unnormalized). Return: `torch.Tensor`: A `torch.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)` containing the transition scores (logits) Examples: ```python >>> from transformers import GPT2Tokenizer, AutoModelForCausalLM >>> import numpy as np >>> tokenizer = GPT2Tokenizer.from_pretrained("gpt2") >>> model = AutoModelForCausalLM.from_pretrained("gpt2") >>> tokenizer.pad_token_id = tokenizer.eos_token_id >>> inputs = tokenizer(["Today is"], return_tensors="pt") >>> # Example 1: Print the scores for each token generated with Greedy Search >>> outputs = model.generate(**inputs, max_new_tokens=5, return_dict_in_generate=True, output_scores=True) >>> transition_scores = model.compute_transition_scores( ... outputs.sequences, outputs.scores, normalize_logits=True ... ) >>> # input_length is the length of the input prompt for decoder-only models, like the GPT family, and 1 for >>> # encoder-decoder models, like BART or T5. >>> input_length = 1 if model.config.is_encoder_decoder else inputs.input_ids.shape[1] >>> generated_tokens = outputs.sequences[:, input_length:] >>> for tok, score in zip(generated_tokens[0], transition_scores[0]): ... # | token | token string | logits | probability ... print(f"| {tok:5d} | {tokenizer.decode(tok):8s} | {score.numpy():.3f} | {np.exp(score.numpy()):.2%}") | 262 | the | -1.414 | 24.33% | 1110 | day | -2.609 | 7.36% | 618 | when | -2.010 | 13.40% | 356 | we | -1.859 | 15.58% | 460 | can | -2.508 | 8.14% >>> # Example 2: Reconstruct the sequence scores from Beam Search >>> outputs = model.generate( ... **inputs, ... max_new_tokens=5, ... num_beams=4, ... num_return_sequences=4, ... return_dict_in_generate=True, ... output_scores=True, ... ) >>> transition_scores = model.compute_transition_scores( ... outputs.sequences, outputs.scores, outputs.beam_indices, normalize_logits=False ... ) >>> # If you sum the generated tokens' scores and apply the length penalty, you'll get the sequence scores. >>> # Tip: recomputing the scores is only guaranteed to match with `normalize_logits=False`. Depending on the >>> # use case, you might want to recompute it with `normalize_logits=True`. >>> output_length = input_length + np.sum(transition_scores.numpy() < 0, axis=1) >>> length_penalty = model.generation_config.length_penalty >>> reconstructed_scores = transition_scores.sum(axis=1) / (output_length**length_penalty) >>> print(np.allclose(outputs.sequences_scores, reconstructed_scores)) True ```""" # 1. In absence of `beam_indices`, we can assume that we come from e.g. greedy search, which is equivalent # to a beam search approach were the first (and only) beam is always selected if beam_indices is None: beam_indices = torch.arange(scores[0].shape[0]).view(-1, 1).to(sequences.device) beam_indices = beam_indices.expand(-1, len(scores)) # 2. reshape scores as [batch_size*vocab_size, # generation steps] with # generation steps being # seq_len - input_length scores = torch.stack(scores).reshape(len(scores), -1).transpose(0, 1) # 3. Optionally normalize the logits (across the vocab dimension) if normalize_logits: scores = scores.reshape(-1, self.config.vocab_size, scores.shape[-1]) scores = torch.nn.functional.log_softmax(scores, dim=1) scores = scores.reshape(-1, scores.shape[-1]) # 4. cut beam_indices to longest beam length beam_indices_mask = beam_indices < 0 max_beam_length = (1 - beam_indices_mask.long()).sum(-1).max() beam_indices = beam_indices.clone()[:, :max_beam_length] beam_indices_mask = beam_indices_mask[:, :max_beam_length] # 5. Set indices of beams that finished early to 0; such indices will be masked correctly afterwards beam_indices[beam_indices_mask] = 0 # 6. multiply beam_indices with vocab size to gather correctly from scores beam_sequence_indices = beam_indices * self.config.vocab_size # 7. Define which indices contributed to scores cut_idx = sequences.shape[-1] - max_beam_length indices = sequences[:, cut_idx:] + beam_sequence_indices # 8. Compute scores transition_scores = scores.gather(0, indices) # 9. Mask out transition_scores of beams that stopped early transition_scores[beam_indices_mask] = 0 return transition_scores def _validate_model_class(self): """ Confirms that the model class is compatible with generation. If not, raises an exception that points to the right class to use. """ if not self.can_generate(): generate_compatible_mappings = [ MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, MODEL_FOR_VISION_2_SEQ_MAPPING, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, ] generate_compatible_classes = set() for model_mapping in generate_compatible_mappings: supported_models = model_mapping.get(type(self.config), default=None) if supported_models is not None: generate_compatible_classes.add(supported_models.__name__) exception_message = ( f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as " "it doesn't have a language model head." ) if generate_compatible_classes: exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}" raise TypeError(exception_message) def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]): """Validates model kwargs for generation. Generate argument typos will also be caught here.""" # Excludes arguments that are handled before calling any model function if self.config.is_encoder_decoder: for key in ["decoder_input_ids"]: model_kwargs.pop(key, None) unused_model_args = [] model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters) # `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If # `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;) if "kwargs" in model_args or "model_kwargs" in model_args: model_args |= set(inspect.signature(self.forward).parameters) for key, value in model_kwargs.items(): if value is not None and key not in model_args: unused_model_args.append(key) if unused_model_args: raise ValueError( f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the" " generate arguments will also show up in this list)" ) @torch.no_grad() def generate( self, inputs: Optional[torch.Tensor] = None, generation_config: Optional[GenerationConfig] = None, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None, synced_gpus: Optional[bool] = None, assistant_model: Optional["PreTrainedModel"] = None, streamer: Optional["BaseStreamer"] = None, **kwargs, ) -> Union[GenerateOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head. <Tip warning={true}> Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the model's default generation configuration. You can override any `generation_config` by passing the corresponding parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`. For an overview of generation strategies and code examples, check out the [following guide](../generation_strategies). </Tip> Parameters: inputs (`torch.Tensor` of varying shape depending on the modality, *optional*): The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs` should of in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of `input_ids`, `input_values`, `input_features`, or `pixel_values`. generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which had the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. logits_processor (`LogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and generation config. If a logit processor is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. stopping_criteria (`StoppingCriteriaList`, *optional*): Custom stopping criteria that complement the default stopping criteria built from arguments and a generation config. If a stopping criteria is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*): If provided, this function constraints the beam search to allowed tokens only at each step. If not provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and `input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful for constrained generation conditioned on the prefix, as described in [Autoregressive Entity Retrieval](https://arxiv.org/abs/2010.00904). synced_gpus (`bool`, *optional*): Whether to continue running the while loop until max_length. Unless overridden this flag will be set to `True` under DeepSpeed ZeRO Stage 3 multiple GPUs environment to avoid hanging if one GPU finished generating before other GPUs. Otherwise it'll be set to `False`. assistant_model (`PreTrainedModel`, *optional*): An assistant model that can be used to accelerate generation. The assistant model must have the exact same tokenizer. The acceleration is achieved when forecasting candidate tokens with the assistent model is much faster than running generation with the model you're calling generate from. As such, the assistant model should be much smaller. streamer (`BaseStreamer`, *optional*): Streamer object that will be used to stream the generated sequences. Generated tokens are passed through `streamer.put(token_ids)` and the streamer is responsible for any further processing. kwargs (`Dict[str, Any]`, *optional*): Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. Return: [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible [`~utils.ModelOutput`] types are: - [`~generation.GreedySearchDecoderOnlyOutput`], - [`~generation.SampleDecoderOnlyOutput`], - [`~generation.BeamSearchDecoderOnlyOutput`], - [`~generation.BeamSampleDecoderOnlyOutput`] If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible [`~utils.ModelOutput`] types are: - [`~generation.GreedySearchEncoderDecoderOutput`], - [`~generation.SampleEncoderDecoderOutput`], - [`~generation.BeamSearchEncoderDecoderOutput`], - [`~generation.BeamSampleEncoderDecoderOutput`] """ if synced_gpus is None: if is_deepspeed_zero3_enabled() and dist.get_world_size() > 1: synced_gpus = True else: synced_gpus = False # 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call self._validate_model_class() # priority: `generation_config` argument > `model.generation_config` (the default generation config) if generation_config is None: # legacy: users may modify the model configuration to control generation -- update the generation config # model attribute accordingly, if it was created from the model config if self.generation_config._from_model_config: new_generation_config = GenerationConfig.from_model_config(self.config) if new_generation_config != self.generation_config: warnings.warn( "You have modified the pretrained model configuration to control generation. This is a" " deprecated strategy to control generation and will be removed soon, in a future version." " Please use a generation configuration file (see" " https://huggingface.co/docs/transformers/main_classes/text_generation )" ) self.generation_config = new_generation_config generation_config = self.generation_config generation_config = copy.deepcopy(generation_config) model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs generation_config.validate() self._validate_model_kwargs(model_kwargs.copy()) # 2. Set generation parameters if not already defined logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if generation_config.pad_token_id is None and generation_config.eos_token_id is not None: if model_kwargs.get("attention_mask", None) is None: logger.warning( "The attention mask and the pad token id were not set. As a consequence, you may observe " "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results." ) eos_token_id = generation_config.eos_token_id if isinstance(eos_token_id, list): eos_token_id = eos_token_id[0] logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.") generation_config.pad_token_id = eos_token_id # 3. Define model inputs # inputs_tensor has to be defined # model_input_name is defined if model-specific keyword input is passed # otherwise model_input_name is None # all model-specific keyword inputs are removed from `model_kwargs` inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs( inputs, generation_config.bos_token_id, model_kwargs ) batch_size = inputs_tensor.shape[0] # 4. Define other model kwargs model_kwargs["output_attentions"] = generation_config.output_attentions model_kwargs["output_hidden_states"] = generation_config.output_hidden_states # decoder-only models with inputs_embeds forwarding must use caching (otherwise we can't detect whether we are # generating the first new token or not, and we only want to use the embeddings for the first new token) if not self.config.is_encoder_decoder and model_input_name == "inputs_embeds": model_kwargs["use_cache"] = True else: model_kwargs["use_cache"] = generation_config.use_cache accepts_attention_mask = "attention_mask" in set(inspect.signature(self.forward).parameters.keys()) requires_attention_mask = "encoder_outputs" not in model_kwargs if model_kwargs.get("attention_mask", None) is None and requires_attention_mask and accepts_attention_mask: model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation( inputs_tensor, generation_config.pad_token_id, generation_config.eos_token_id ) # decoder-only models should use left-padding for generation if not self.config.is_encoder_decoder: # If `input_ids` was given, check if the last id in any sequence is `pad_token_id` # Note: If using, `inputs_embeds` this check does not work, because we want to be more hands-off. if ( generation_config.pad_token_id is not None and len(inputs_tensor.shape) == 2 and torch.sum(inputs_tensor[:, -1] == generation_config.pad_token_id) > 0 ): logger.warning( "A decoder-only architecture is being used, but right-padding was detected! For correct " "generation results, please set `padding_side='left'` when initializing the tokenizer." ) if self.config.is_encoder_decoder and "encoder_outputs" not in model_kwargs: # if model is encoder decoder encoder_outputs are created # and added to `model_kwargs` model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation( inputs_tensor, model_kwargs, model_input_name ) # 5. Prepare `input_ids` which will be used for auto-regressive generation if self.config.is_encoder_decoder: input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation( batch_size=batch_size, model_input_name=model_input_name, model_kwargs=model_kwargs, decoder_start_token_id=generation_config.decoder_start_token_id, bos_token_id=generation_config.bos_token_id, device=inputs_tensor.device, ) else: input_ids = inputs_tensor if model_input_name == "input_ids" else model_kwargs.pop("input_ids") if streamer is not None: streamer.put(input_ids.cpu()) # 6. Prepare `max_length` depending on other stopping criteria. input_ids_seq_length = input_ids.shape[-1] has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None if has_default_max_length and generation_config.max_new_tokens is None: warnings.warn( f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. " "This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we" " recommend using `max_new_tokens` to control the maximum length of the generation.", UserWarning, ) elif generation_config.max_new_tokens is not None: if not has_default_max_length: logger.warning( f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=" f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. " "Please refer to the documentation for more information. " "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)" ) generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length if generation_config.min_length is not None and generation_config.min_length > generation_config.max_length: raise ValueError( f"Unfeasible length constraints: the minimum length ({generation_config.min_length}) is larger than" f" the maximum length ({generation_config.max_length})" ) if input_ids_seq_length >= generation_config.max_length: input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids" logger.warning( f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to" f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider" " increasing `max_new_tokens`." ) # 7. determine generation mode is_constraint_gen_mode = ( generation_config.constraints is not None or generation_config.force_words_ids is not None ) is_contrastive_search_gen_mode = ( (generation_config.num_beams == 1) and generation_config.top_k is not None and generation_config.top_k > 1 and generation_config.do_sample is False and generation_config.penalty_alpha is not None and generation_config.penalty_alpha > 0 ) is_greedy_gen_mode = ( (generation_config.num_beams == 1) and (generation_config.num_beam_groups == 1) and generation_config.do_sample is False and not is_constraint_gen_mode and not is_contrastive_search_gen_mode ) is_sample_gen_mode = ( (generation_config.num_beams == 1) and (generation_config.num_beam_groups == 1) and generation_config.do_sample is True and not is_constraint_gen_mode and not is_contrastive_search_gen_mode ) is_beam_gen_mode = ( (generation_config.num_beams > 1) and (generation_config.num_beam_groups == 1) and generation_config.do_sample is False and not is_constraint_gen_mode and not is_contrastive_search_gen_mode ) is_beam_sample_gen_mode = ( (generation_config.num_beams > 1) and (generation_config.num_beam_groups == 1) and generation_config.do_sample is True and not is_constraint_gen_mode and not is_contrastive_search_gen_mode ) is_group_beam_gen_mode = ( (generation_config.num_beams > 1) and (generation_config.num_beam_groups > 1) and not is_constraint_gen_mode and not is_contrastive_search_gen_mode ) is_assisted_gen_mode = False if assistant_model is not None: if not (is_greedy_gen_mode or is_sample_gen_mode): raise ValueError( "You've set `assistant_model`, which triggers assisted generate. Currently, assisted generate " "is only supported with Greedy Search and Sample." ) is_assisted_gen_mode = True if generation_config.num_beam_groups > generation_config.num_beams: raise ValueError("`num_beam_groups` has to be smaller or equal to `num_beams`") if is_group_beam_gen_mode and generation_config.do_sample is True: raise ValueError( "Diverse beam search cannot be used in sampling mode. Make sure that `do_sample` is set to `False`." ) if streamer is not None and (generation_config.num_beams > 1): raise ValueError( "`streamer` cannot be used with beam search (yet!). Make sure that `num_beams` is set to 1." ) if self.device.type != input_ids.device.type: warnings.warn( "You are calling .generate() with the `input_ids` being on a device type different" f" than your model's device. `input_ids` is on {input_ids.device.type}, whereas the model" f" is on {self.device.type}. You may experience unexpected behaviors or slower generation." " Please make sure that you have put `input_ids` to the" f" correct device by calling for example input_ids = input_ids.to('{self.device.type}') before" " running `.generate()`.", UserWarning, ) # 8. prepare distribution pre_processing samplers logits_processor = self._get_logits_processor( generation_config=generation_config, input_ids_seq_length=input_ids_seq_length, encoder_input_ids=inputs_tensor, prefix_allowed_tokens_fn=prefix_allowed_tokens_fn, logits_processor=logits_processor, ) # 9. prepare stopping criteria stopping_criteria = self._get_stopping_criteria( generation_config=generation_config, stopping_criteria=stopping_criteria ) # 10. go into different generation modes if is_assisted_gen_mode: if generation_config.num_return_sequences > 1: raise ValueError( "num_return_sequences has to be 1 when doing assisted generate, " f"but is {generation_config.num_return_sequences}." ) if batch_size > 1: raise ValueError("assisted generate is only supported for batch_size = 1") if not model_kwargs["use_cache"]: raise ValueError("assisted generate requires `use_cache=True`") # 11. If the assistant model is an encoder-decoder, prepare its encoder outputs if assistant_model.config.is_encoder_decoder: assistant_model_kwargs = copy.deepcopy(model_kwargs) inputs_tensor, model_input_name, assistant_model_kwargs = assistant_model._prepare_model_inputs( inputs_tensor, assistant_model.generation_config.bos_token_id, assistant_model_kwargs ) assistant_model_kwargs = assistant_model._prepare_encoder_decoder_kwargs_for_generation( inputs_tensor, assistant_model_kwargs, model_input_name ) model_kwargs["assistant_encoder_outputs"] = assistant_model_kwargs["encoder_outputs"] # 12. run assisted generate return self.assisted_decoding( input_ids, assistant_model=assistant_model, do_sample=generation_config.do_sample, logits_processor=logits_processor, logits_warper=self._get_logits_warper(generation_config) if generation_config.do_sample else None, stopping_criteria=stopping_criteria, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, synced_gpus=synced_gpus, streamer=streamer, **model_kwargs, ) if is_greedy_gen_mode: if generation_config.num_return_sequences > 1: raise ValueError( "num_return_sequences has to be 1 when doing greedy search, " f"but is {generation_config.num_return_sequences}." ) # 11. run greedy search return self.greedy_search( input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, synced_gpus=synced_gpus, streamer=streamer, **model_kwargs, ) elif is_contrastive_search_gen_mode: if generation_config.num_return_sequences > 1: raise ValueError( "num_return_sequences has to be 1 when doing contrastive search, " f"but is {generation_config.num_return_sequences}." ) if not model_kwargs["use_cache"]: raise ValueError("Contrastive search requires `use_cache=True`") return self.contrastive_search( input_ids, top_k=generation_config.top_k, penalty_alpha=generation_config.penalty_alpha, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, synced_gpus=synced_gpus, streamer=streamer, **model_kwargs, ) elif is_sample_gen_mode: # 11. prepare logits warper logits_warper = self._get_logits_warper(generation_config) # 12. expand input_ids with `num_return_sequences` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids=input_ids, expand_size=generation_config.num_return_sequences, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs, ) # 13. run sample return self.sample( input_ids, logits_processor=logits_processor, logits_warper=logits_warper, stopping_criteria=stopping_criteria, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, synced_gpus=synced_gpus, streamer=streamer, **model_kwargs, ) elif is_beam_gen_mode: if generation_config.num_return_sequences > generation_config.num_beams: raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.") if stopping_criteria.max_length is None: raise ValueError("`max_length` needs to be a stopping_criteria for now.") # 11. prepare beam search scorer beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=generation_config.num_beams, device=inputs_tensor.device, length_penalty=generation_config.length_penalty, do_early_stopping=generation_config.early_stopping, num_beam_hyps_to_keep=generation_config.num_return_sequences, max_length=generation_config.max_length, ) # 12. interleave input_ids with `num_beams` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids=input_ids, expand_size=generation_config.num_beams, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs, ) # 13. run beam search return self.beam_search( input_ids, beam_scorer, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_beam_sample_gen_mode: # 11. prepare logits warper logits_warper = self._get_logits_warper(generation_config) if stopping_criteria.max_length is None: raise ValueError("`max_length` needs to be a stopping_criteria for now.") # 12. prepare beam search scorer beam_scorer = BeamSearchScorer( batch_size=batch_size * generation_config.num_return_sequences, num_beams=generation_config.num_beams, device=inputs_tensor.device, length_penalty=generation_config.length_penalty, do_early_stopping=generation_config.early_stopping, max_length=generation_config.max_length, ) # 13. interleave input_ids with `num_beams` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids=input_ids, expand_size=generation_config.num_beams * generation_config.num_return_sequences, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs, ) # 14. run beam sample return self.beam_sample( input_ids, beam_scorer, logits_processor=logits_processor, logits_warper=logits_warper, stopping_criteria=stopping_criteria, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_group_beam_gen_mode: if generation_config.num_return_sequences > generation_config.num_beams: raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.") if generation_config.num_beams % generation_config.num_beam_groups != 0: raise ValueError("`num_beams` should be divisible by `num_beam_groups` for group beam search.") if generation_config.diversity_penalty == 0.0: raise ValueError( "`diversity_penalty` should be greater than `0.0`, otherwise your beam groups will be identical." ) if stopping_criteria.max_length is None: raise ValueError("`max_length` needs to be a stopping_criteria for now.") has_default_typical_p = kwargs.get("typical_p") is None and generation_config.typical_p == 1.0 if not has_default_typical_p: raise ValueError("Decoder argument `typical_p` is not supported with beam groups.") # 11. prepare beam search scorer beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=generation_config.num_beams, device=inputs_tensor.device, length_penalty=generation_config.length_penalty, do_early_stopping=generation_config.early_stopping, num_beam_hyps_to_keep=generation_config.num_return_sequences, num_beam_groups=generation_config.num_beam_groups, max_length=generation_config.max_length, ) # 12. interleave input_ids with `num_beams` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids=input_ids, expand_size=generation_config.num_beams, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs, ) # 13. run beam search return self.group_beam_search( input_ids, beam_scorer, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) elif is_constraint_gen_mode: if generation_config.num_return_sequences > generation_config.num_beams: raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.") if stopping_criteria.max_length is None: raise ValueError("`max_length` needs to be a stopping_criteria for now.") if generation_config.num_beams <= 1: raise ValueError("`num_beams` needs to be greater than 1 for constrained generation.") if generation_config.do_sample: raise ValueError("`do_sample` needs to be false for constrained generation.") if generation_config.num_beam_groups is not None and generation_config.num_beam_groups > 1: raise ValueError("`num_beam_groups` not supported yet for constrained generation.") final_constraints = [] if generation_config.constraints is not None: final_constraints = generation_config.constraints if generation_config.force_words_ids is not None: def typeerror(): raise ValueError( "`force_words_ids` has to either be a `List[List[List[int]]]` or `List[List[int]]`" f"of positive integers, but is {generation_config.force_words_ids}." ) if ( not isinstance(generation_config.force_words_ids, list) or len(generation_config.force_words_ids) == 0 ): typeerror() for word_ids in generation_config.force_words_ids: if isinstance(word_ids[0], list): if not isinstance(word_ids, list) or len(word_ids) == 0: typeerror() if any(not isinstance(token_ids, list) for token_ids in word_ids): typeerror() if any( any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids) for token_ids in word_ids ): typeerror() constraint = DisjunctiveConstraint(word_ids) else: if not isinstance(word_ids, list) or len(word_ids) == 0: typeerror() if any((not isinstance(token_id, int) or token_id < 0) for token_id in word_ids): typeerror() constraint = PhrasalConstraint(word_ids) final_constraints.append(constraint) # 11. prepare beam search scorer constrained_beam_scorer = ConstrainedBeamSearchScorer( constraints=final_constraints, batch_size=batch_size, num_beams=generation_config.num_beams, device=inputs_tensor.device, length_penalty=generation_config.length_penalty, do_early_stopping=generation_config.early_stopping, num_beam_hyps_to_keep=generation_config.num_return_sequences, max_length=generation_config.max_length, ) # 12. interleave input_ids with `num_beams` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids=input_ids, expand_size=generation_config.num_beams, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs, ) # 13. run beam search return self.constrained_beam_search( input_ids, constrained_beam_scorer=constrained_beam_scorer, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, synced_gpus=synced_gpus, **model_kwargs, ) @torch.no_grad() def contrastive_search( self, input_ids: torch.LongTensor, top_k: Optional[int] = 1, penalty_alpha: Optional[float] = 0, logits_processor: Optional[LogitsProcessorList] = None, logits_warper: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[Union[int, List[int]]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: bool = False, streamer: Optional["BaseStreamer"] = None, **model_kwargs, ) -> Union[ContrastiveSearchOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head using **contrastive search** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. <Tip warning={true}> In most cases, you do not need to call [`~generation.GenerationMixin.contrastive_search`] directly. Use generate() instead. For an overview of generation strategies and code examples, check the [following guide](../generation_strategies). </Tip> Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. top_k (`int`, *optional*, defaults to 1): The size of the candidate set that is used to re-rank for contrastive search penalty_alpha (`float`, *optional*, defaults to 0): The degeneration penalty for contrastive search; activate when it is larger than 0 logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. logits_warper (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) streamer (`BaseStreamer`, *optional*): Streamer object that will be used to stream the generated sequences. Generated tokens are passed through `streamer.put(token_ids)` and the streamer is responsible for any further processing. model_kwargs: Additional model specific keyword arguments will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation.ContrastiveSearchDecoderOnlyOutput`], [`~generation.ContrastiveSearchEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation.ContrastiveSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation.ContrastiveSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForCausalLM, ... StoppingCriteriaList, ... MaxLengthCriteria, ... ) >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m") >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m") >>> # set pad_token_id to eos_token_id because OPT does not have a PAD token >>> model.config.pad_token_id = model.config.eos_token_id >>> input_prompt = "DeepMind Company is" >>> input_ids = tokenizer(input_prompt, return_tensors="pt") >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=64)]) >>> outputs = model.contrastive_search( ... **input_ids, penalty_alpha=0.6, top_k=4, stopping_criteria=stopping_criteria ... ) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['DeepMind Company is a company that focuses on the development and commercialization of artificial intelligence (AI). DeepMind’s mission is to help people understand and solve problems that are difficult to solve in the world today.\n\nIn this post, we talk about the benefits of deep learning in business and how it'] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] eos_token_id_tensor = torch.tensor(eos_token_id).to(input_ids.device) if eos_token_id is not None else None output_scores = output_scores if output_scores is not None else self.generation_config.output_scores output_attentions = ( output_attentions if output_attentions is not None else self.generation_config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.generation_config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) # keep track of which sequences are already finished unfinished_sequences = torch.ones(input_ids.shape[0], dtype=torch.long, device=input_ids.device) this_peer_finished = False # used by synced_gpus only batch_size = input_ids.shape[0] while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break # if the first step in the loop, encode all the prefix and obtain: (1) past_key_values; # (2) last_hidden_states; (3) logit_for_next_step; (4) update model kwargs for the next step if model_kwargs.get("past_key_values") is None: # prepare inputs model_kwargs["use_cache"] = True model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) # encode the given prefix and prepare model inputs; encoder-decoder model process the prefix and save # the `encoder_outputs` outputs = self( **model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions ) # last decoder hidden states will be used to compute the degeneration penalty (cosine similarity with # previous tokens) if self.config.is_encoder_decoder: last_hidden_states = outputs.decoder_hidden_states[-1] else: last_hidden_states = outputs.hidden_states[-1] # next logit for contrastive search to select top-k candidate tokens logit_for_next_step = outputs.logits[:, -1, :] model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder, standardize_cache_format=True, ) # Expands model inputs top_k times, for batched forward passes (akin to beam search). _, model_kwargs = self._expand_inputs_for_generation( expand_size=top_k, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs ) past_key_values = model_kwargs.get("past_key_values") if past_key_values is None: raise ValueError( f"{self.__class__.__name__} does not support caching and therefore **can't** be used " "for contrastive search." ) elif ( not isinstance(past_key_values[0], (tuple, torch.Tensor)) or past_key_values[0][0].shape[0] != batch_size ): raise ValueError( f"{self.__class__.__name__} does not have a standard cache format and therefore **can't** be " "used for contrastive search without further modifications." ) # contrastive_search main logic start: # contrastive search decoding consists of two steps: (1) candidate tokens recall; (2) candidate re-rank by # degeneration penalty logit_for_next_step = logits_processor(input_ids, logit_for_next_step) logit_for_next_step = logits_warper(input_ids, logit_for_next_step) next_probs = nn.functional.softmax(logit_for_next_step, dim=-1) top_k_probs, top_k_ids = torch.topk(next_probs, dim=-1, k=top_k) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (logit_for_next_step,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # Replicates the new past_key_values to match the `top_k` candidates new_key_values = [] for layer in model_kwargs["past_key_values"]: items = [] # item is either the key or the value matrix for item in layer: items.append(item.repeat_interleave(top_k, dim=0)) new_key_values.append(items) model_kwargs["past_key_values"] = new_key_values # compute the candidate tokens by the language model and collects their hidden_states next_model_inputs = self.prepare_inputs_for_generation(top_k_ids.view(-1, 1), **model_kwargs) outputs = self( **next_model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions ) next_past_key_values = self._extract_past_from_model_output(outputs, standardize_cache_format=True) logits = outputs.logits[:, -1, :] # name is different for encoder-decoder and decoder-only models if self.config.is_encoder_decoder: next_hidden = outputs.decoder_hidden_states[-1] full_hidden_states = outputs.decoder_hidden_states else: next_hidden = outputs.hidden_states[-1] full_hidden_states = outputs.hidden_states context_hidden = last_hidden_states.repeat_interleave(top_k, dim=0) # compute the degeneration penalty and re-rank the candidates based on the degeneration penalty and the # model confidence. Keeping `selected_idx` on CPU enables multi-device contrastive search and doesn't # introduce (noticeable) slowdowns on single-device runs. selected_idx = _ranking_fast(context_hidden, next_hidden, top_k_probs, penalty_alpha, top_k) selected_idx = selected_idx.to("cpu") # prepare for the next step: (1) next token_id; (2) past_key_values; (3) last_hidden_states for computing # the degeneration penalty; (4) logits for selecting next top-k candidates; (5) selected tokens scores # (model confidence minus degeneration penalty); (6) decoder hidden_states next_tokens = top_k_ids[range(len(top_k_ids)), selected_idx] next_hidden = torch.stack(torch.split(next_hidden.squeeze(dim=1), top_k)) next_hidden = next_hidden[range(batch_size), selected_idx, :] last_hidden_states = torch.cat([last_hidden_states, next_hidden.unsqueeze(1)], dim=1) next_decoder_hidden_states = () for layer in full_hidden_states: layer = torch.stack(torch.split(layer, top_k))[range(batch_size), selected_idx, :] next_decoder_hidden_states += (layer,) # select the past_key_value new_key_values = () for layer in next_past_key_values: items = () # item is either the key or the value matrix for item in layer: item = torch.stack(torch.split(item, top_k, dim=0)) # [B, K, num_head, seq_len, esz] item = item[range(batch_size), selected_idx, ...] # [B, num_head, seq_len, esz] items += (item,) new_key_values += (items,) next_past_key_values = new_key_values logit_for_next_step = torch.stack(torch.split(logits, top_k))[range(batch_size), selected_idx, :] # Rebuilds the relevant parts of the model output for the selected token, for use in the next iteration if self.config.is_encoder_decoder: next_step_cross_attentions = () next_step_decoder_attentions = () if output_attentions: for layer in outputs.cross_attentions: layer = torch.stack(torch.split(layer, top_k, dim=0))[range(batch_size), selected_idx, ...] next_step_cross_attentions += (layer,) for layer in outputs.decoder_attentions: layer = torch.stack(torch.split(layer, top_k, dim=0))[range(batch_size), selected_idx, ...] next_step_decoder_attentions += (layer,) outputs = Seq2SeqLMOutput( past_key_values=next_past_key_values, decoder_hidden_states=next_decoder_hidden_states, decoder_attentions=next_step_decoder_attentions or None, cross_attentions=next_step_cross_attentions or None, ) else: next_step_attentions = () if output_attentions: for layer in outputs.attentions: layer = torch.stack(torch.split(layer, top_k, dim=0))[range(batch_size), selected_idx, ...] next_step_attentions += (layer,) outputs = CausalLMOutputWithPast( past_key_values=next_past_key_values, hidden_states=next_decoder_hidden_states, attentions=next_step_attentions or None, ) # contrastive_search main logic end if synced_gpus and this_peer_finished: continue # don't waste resources running the code we don't need # finished sentences should have their next token be a padding token if eos_token_id is not None: if pad_token_id is None: raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences) # update generated ids, model inputs, and length for next step input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) if streamer is not None: streamer.put(next_tokens.cpu()) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # if eos_token was found in one sentence, set sentence to finished if eos_token_id_tensor is not None: unfinished_sequences = unfinished_sequences.mul( next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0) ) # stop when each sentence is finished if unfinished_sequences.max() == 0: this_peer_finished = True # stop if we exceed the maximum length if stopping_criteria(input_ids, scores): this_peer_finished = True if this_peer_finished and not synced_gpus: break if streamer is not None: streamer.end() if return_dict_in_generate: if self.config.is_encoder_decoder: return ContrastiveSearchEncoderDecoderOutput( sequences=input_ids, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return ContrastiveSearchDecoderOnlyOutput( sequences=input_ids, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return input_ids def greedy_search( self, input_ids: torch.LongTensor, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[Union[int, List[int]]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: bool = False, streamer: Optional["BaseStreamer"] = None, **model_kwargs, ) -> Union[GreedySearchOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head using **greedy decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. <Tip warning={true}> In most cases, you do not need to call [`~generation.GenerationMixin.greedy_search`] directly. Use generate() instead. For an overview of generation strategies and code examples, check the [following guide](../generation_strategies). </Tip> Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) streamer (`BaseStreamer`, *optional*): Streamer object that will be used to stream the generated sequences. Generated tokens are passed through `streamer.put(token_ids)` and the streamer is responsible for any further processing. model_kwargs: Additional model specific keyword arguments will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation.GreedySearchDecoderOnlyOutput`], [`~generation.GreedySearchEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation.GreedySearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation.GreedySearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForCausalLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... StoppingCriteriaList, ... MaxLengthCriteria, ... ) >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> model = AutoModelForCausalLM.from_pretrained("gpt2") >>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token >>> model.generation_config.pad_token_id = model.generation_config.eos_token_id >>> input_prompt = "It might be possible to" >>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList( ... [ ... MinLengthLogitsProcessor(10, eos_token_id=model.generation_config.eos_token_id), ... ] ... ) >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)]) >>> outputs = model.greedy_search( ... input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria ... ) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ["It might be possible to get a better understanding of the nature of the problem, but it's not"] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use" " `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] eos_token_id_tensor = torch.tensor(eos_token_id).to(input_ids.device) if eos_token_id is not None else None output_scores = output_scores if output_scores is not None else self.generation_config.output_scores output_attentions = ( output_attentions if output_attentions is not None else self.generation_config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.generation_config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) # keep track of which sequences are already finished unfinished_sequences = torch.ones(input_ids.shape[0], dtype=torch.long, device=input_ids.device) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break # prepare model inputs model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) # forward pass to get next token outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # pre-process distribution next_tokens_scores = logits_processor(input_ids, next_token_logits) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_tokens_scores,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # argmax next_tokens = torch.argmax(next_tokens_scores, dim=-1) # finished sentences should have their next token be a padding token if eos_token_id is not None: if pad_token_id is None: raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences) # update generated ids, model inputs, and length for next step input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) if streamer is not None: streamer.put(next_tokens.cpu()) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # if eos_token was found in one sentence, set sentence to finished if eos_token_id_tensor is not None: unfinished_sequences = unfinished_sequences.mul( next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0) ) # stop when each sentence is finished if unfinished_sequences.max() == 0: this_peer_finished = True # stop if we exceed the maximum length if stopping_criteria(input_ids, scores): this_peer_finished = True if this_peer_finished and not synced_gpus: break if streamer is not None: streamer.end() if return_dict_in_generate: if self.config.is_encoder_decoder: return GreedySearchEncoderDecoderOutput( sequences=input_ids, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return GreedySearchDecoderOnlyOutput( sequences=input_ids, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return input_ids def sample( self, input_ids: torch.LongTensor, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, logits_warper: Optional[LogitsProcessorList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[Union[int, List[int]]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: bool = False, streamer: Optional["BaseStreamer"] = None, **model_kwargs, ) -> Union[SampleOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head using **multinomial sampling** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. <Tip warning={true}> In most cases, you do not need to call [`~generation.GenerationMixin.sample`] directly. Use generate() instead. For an overview of generation strategies and code examples, check the [following guide](../generation_strategies). </Tip> Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. logits_warper (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) streamer (`BaseStreamer`, *optional*): Streamer object that will be used to stream the generated sequences. Generated tokens are passed through `streamer.put(token_ids)` and the streamer is responsible for any further processing. model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation.SampleDecoderOnlyOutput`], [`~generation.SampleEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation.SampleDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation.SampleEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForCausalLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... TopKLogitsWarper, ... TemperatureLogitsWarper, ... StoppingCriteriaList, ... MaxLengthCriteria, ... ) >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> model = AutoModelForCausalLM.from_pretrained("gpt2") >>> # set pad_token_id to eos_token_id because GPT2 does not have a EOS token >>> model.config.pad_token_id = model.config.eos_token_id >>> model.generation_config.pad_token_id = model.config.eos_token_id >>> input_prompt = "Today is a beautiful day, and" >>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList( ... [ ... MinLengthLogitsProcessor(15, eos_token_id=model.generation_config.eos_token_id), ... ] ... ) >>> # instantiate logits processors >>> logits_warper = LogitsProcessorList( ... [ ... TopKLogitsWarper(50), ... TemperatureLogitsWarper(0.7), ... ] ... ) >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)]) >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT >>> outputs = model.sample( ... input_ids, ... logits_processor=logits_processor, ... logits_warper=logits_warper, ... stopping_criteria=stopping_criteria, ... ) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Today is a beautiful day, and we must do everything possible to make it a day of celebration.'] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use" " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList() pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] eos_token_id_tensor = torch.tensor(eos_token_id).to(input_ids.device) if eos_token_id is not None else None output_scores = output_scores if output_scores is not None else self.generation_config.output_scores output_attentions = ( output_attentions if output_attentions is not None else self.generation_config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.generation_config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) # keep track of which sequences are already finished unfinished_sequences = torch.ones(input_ids.shape[0], dtype=torch.long, device=input_ids.device) this_peer_finished = False # used by synced_gpus only # auto-regressive generation while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break # prepare model inputs model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) # forward pass to get next token outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # pre-process distribution next_token_scores = logits_processor(input_ids, next_token_logits) next_token_scores = logits_warper(input_ids, next_token_scores) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_token_scores,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # sample probs = nn.functional.softmax(next_token_scores, dim=-1) next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1) # finished sentences should have their next token be a padding token if eos_token_id is not None: if pad_token_id is None: raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences) # update generated ids, model inputs, and length for next step input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) if streamer is not None: streamer.put(next_tokens.cpu()) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # if eos_token was found in one sentence, set sentence to finished if eos_token_id_tensor is not None: unfinished_sequences = unfinished_sequences.mul( next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0) ) # stop when each sentence is finished if unfinished_sequences.max() == 0: this_peer_finished = True # stop if we exceed the maximum length if stopping_criteria(input_ids, scores): this_peer_finished = True if this_peer_finished and not synced_gpus: break if streamer is not None: streamer.end() if return_dict_in_generate: if self.config.is_encoder_decoder: return SampleEncoderDecoderOutput( sequences=input_ids, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return SampleDecoderOnlyOutput( sequences=input_ids, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return input_ids def beam_search( self, input_ids: torch.LongTensor, beam_scorer: BeamScorer, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[Union[int, List[int]]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: bool = False, **model_kwargs, ) -> Union[BeamSearchOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head using **beam search decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. <Tip warning={true}> In most cases, you do not need to call [`~generation.GenerationMixin.beam_search`] directly. Use generate() instead. For an overview of generation strategies and code examples, check the [following guide](../generation_strategies). </Tip> Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. beam_scorer (`BeamScorer`): An derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of [`BeamScorer`] should be read. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`generation.BeamSearchDecoderOnlyOutput`], [`~generation.BeamSearchEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation.BeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation.BeamSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForSeq2SeqLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... BeamSearchScorer, ... ) >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> encoder_input_str = "translate English to German: How old are you?" >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids >>> # lets run beam search using 3 beams >>> num_beams = 3 >>> # define decoder start token ids >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) >>> input_ids = input_ids * model.config.decoder_start_token_id >>> # add encoder_outputs to model keyword arguments >>> model_kwargs = { ... "encoder_outputs": model.get_encoder()( ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ... ) ... } >>> # instantiate beam scorer >>> beam_scorer = BeamSearchScorer( ... batch_size=1, ... num_beams=num_beams, ... device=model.device, ... ) >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList( ... [ ... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ... ] ... ) >>> outputs = model.beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Wie alt bist du?'] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use" " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) if len(stopping_criteria) == 0: warnings.warn("You don't have defined any stopping_criteria, this will likely loop forever", UserWarning) pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] output_scores = output_scores if output_scores is not None else self.generation_config.output_scores output_attentions = ( output_attentions if output_attentions is not None else self.generation_config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.generation_config.return_dict_in_generate ) batch_size = len(beam_scorer._beam_hyps) num_beams = beam_scorer.num_beams batch_beam_size, cur_len = input_ids.shape if num_beams * batch_size != batch_beam_size: raise ValueError( f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None beam_indices = ( tuple(() for _ in range(batch_beam_size)) if (return_dict_in_generate and output_scores) else None ) decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) # initialise score of first beam with 0 and the rest with -1e9. This makes sure that only tokens # of the first beam are considered to avoid sampling the exact same tokens across all beams. beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) beam_scores[:, 1:] = -1e9 beam_scores = beam_scores.view((batch_size * num_beams,)) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: cur_len = cur_len + 1 continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id` # cannot be generated both before and after the `nn.functional.log_softmax` operation. next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len) next_token_scores = nn.functional.log_softmax( next_token_logits, dim=-1 ) # (batch_size * num_beams, vocab_size) next_token_scores_processed = logits_processor(input_ids, next_token_scores) next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as(next_token_scores) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_token_scores_processed,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # reshape for beam search vocab_size = next_token_scores.shape[-1] next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) # Sample 2 next tokens for each beam (so we have some spare tokens and match output of beam search) next_token_scores, next_tokens = torch.topk( next_token_scores, 2 * num_beams, dim=1, largest=True, sorted=True ) next_indices = torch.div(next_tokens, vocab_size, rounding_mode="floor") next_tokens = next_tokens % vocab_size # stateless beam_outputs = beam_scorer.process( input_ids, next_token_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, beam_indices=beam_indices, ) beam_scores = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] beam_idx = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) if model_kwargs["past_key_values"] is not None: model_kwargs["past_key_values"] = self._reorder_cache(model_kwargs["past_key_values"], beam_idx) if return_dict_in_generate and output_scores: beam_indices = tuple((beam_indices[beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices)))) # increase cur_len cur_len = cur_len + 1 if beam_scorer.is_done or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True sequence_outputs = beam_scorer.finalize( input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, max_length=stopping_criteria.max_length, beam_indices=beam_indices, ) if return_dict_in_generate: if not output_scores: sequence_outputs["sequence_scores"] = None if self.config.is_encoder_decoder: return BeamSearchEncoderDecoderOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, beam_indices=sequence_outputs["beam_indices"], encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return BeamSearchDecoderOnlyOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, beam_indices=sequence_outputs["beam_indices"], attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequence_outputs["sequences"] def beam_sample( self, input_ids: torch.LongTensor, beam_scorer: BeamScorer, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, logits_warper: Optional[LogitsProcessorList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[Union[int, List[int]]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: bool = False, **model_kwargs, ) -> Union[BeamSampleOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head using **beam search multinomial sampling** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. <Tip warning={true}> In most cases, you do not need to call [`~generation.GenerationMixin.beam_sample`] directly. Use generate() instead. For an overview of generation strategies and code examples, check the [following guide](../generation_strategies). </Tip> Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. beam_scorer (`BeamScorer`): A derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of [`BeamScorer`] should be read. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. logits_warper (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation.BeamSampleDecoderOnlyOutput`], [`~generation.BeamSampleEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation.BeamSampleDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation.BeamSampleEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForSeq2SeqLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... TopKLogitsWarper, ... TemperatureLogitsWarper, ... BeamSearchScorer, ... ) >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> encoder_input_str = "translate English to German: How old are you?" >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids >>> # lets run beam search using 3 beams >>> num_beams = 3 >>> # define decoder start token ids >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) >>> input_ids = input_ids * model.config.decoder_start_token_id >>> # add encoder_outputs to model keyword arguments >>> model_kwargs = { ... "encoder_outputs": model.get_encoder()( ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ... ) ... } >>> # instantiate beam scorer >>> beam_scorer = BeamSearchScorer( ... batch_size=1, ... max_length=model.config.max_length, ... num_beams=num_beams, ... device=model.device, ... ) >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList( ... [MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id)] ... ) >>> # instantiate logits processors >>> logits_warper = LogitsProcessorList( ... [ ... TopKLogitsWarper(50), ... TemperatureLogitsWarper(0.7), ... ] ... ) >>> outputs = model.beam_sample( ... input_ids, beam_scorer, logits_processor=logits_processor, logits_warper=logits_warper, **model_kwargs ... ) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Wie alt bist du?'] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use" " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] output_scores = output_scores if output_scores is not None else self.generation_config.output_scores output_attentions = ( output_attentions if output_attentions is not None else self.generation_config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.generation_config.return_dict_in_generate ) batch_size = len(beam_scorer._beam_hyps) num_beams = beam_scorer.num_beams batch_beam_size, cur_len = input_ids.shape # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None beam_indices = ( tuple(() for _ in range(batch_beam_size)) if (return_dict_in_generate and output_scores) else None ) decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) beam_scores = beam_scores.view((batch_size * num_beams,)) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: cur_len = cur_len + 1 continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id` # cannot be generated both before and after the `nn.functional.log_softmax` operation. next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len) next_token_scores = nn.functional.log_softmax( next_token_logits, dim=-1 ) # (batch_size * num_beams, vocab_size) next_token_scores_processed = logits_processor(input_ids, next_token_scores) next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as(next_token_scores) # Note: logits warpers are intentionally applied after adding running beam scores. On some logits warpers # (like top_p) this is indiferent, but on others (like temperature) it is not. For reference, see # https://github.com/huggingface/transformers/pull/5420#discussion_r449779867 next_token_scores = logits_warper(input_ids, next_token_scores) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (logits_warper(input_ids, next_token_scores_processed),) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # reshape for beam search vocab_size = next_token_scores.shape[-1] next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) probs = nn.functional.softmax(next_token_scores, dim=-1) next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) next_token_scores = torch.gather(next_token_scores, -1, next_tokens) next_token_scores, _indices = torch.sort(next_token_scores, descending=True, dim=1) next_tokens = torch.gather(next_tokens, -1, _indices) next_indices = torch.div(next_tokens, vocab_size, rounding_mode="floor") next_tokens = next_tokens % vocab_size # stateless beam_outputs = beam_scorer.process( input_ids, next_token_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, beam_indices=beam_indices, ) beam_scores = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] beam_idx = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) if model_kwargs["past_key_values"] is not None: model_kwargs["past_key_values"] = self._reorder_cache(model_kwargs["past_key_values"], beam_idx) if return_dict_in_generate and output_scores: beam_indices = tuple((beam_indices[beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices)))) # increase cur_len cur_len = cur_len + 1 if beam_scorer.is_done or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True sequence_outputs = beam_scorer.finalize( input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, max_length=stopping_criteria.max_length, beam_indices=beam_indices, ) if return_dict_in_generate: if not output_scores: sequence_outputs["sequence_scores"] = None if self.config.is_encoder_decoder: return BeamSampleEncoderDecoderOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, beam_indices=sequence_outputs["beam_indices"], encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return BeamSampleDecoderOnlyOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, beam_indices=sequence_outputs["beam_indices"], attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequence_outputs["sequences"] def group_beam_search( self, input_ids: torch.LongTensor, beam_scorer: BeamScorer, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[Union[int, List[int]]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: bool = False, **model_kwargs, ): r""" Generates sequences of token ids for models with a language modeling head using **diverse beam search decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. <Tip warning={true}> In most cases, you do not need to call [`~generation.GenerationMixin.group_beam_search`] directly. Use generate() instead. For an overview of generation strategies and code examples, check the [following guide](../generation_strategies). </Tip> Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. beam_scorer (`BeamScorer`): An derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of [`BeamScorer`] should be read. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs that will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation.BeamSearchDecoderOnlyOutput`], [`~generation.BeamSearchEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation.BeamSearchDecoderOnlyOutput`] if [`~generation.BeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation.BeamSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForSeq2SeqLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... HammingDiversityLogitsProcessor, ... BeamSearchScorer, ... ) >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> encoder_input_str = "translate English to German: How old are you?" >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids >>> # lets run diverse beam search using 6 beams >>> num_beams = 6 >>> # define decoder start token ids >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) >>> input_ids = input_ids * model.config.decoder_start_token_id >>> # add encoder_outputs to model keyword arguments >>> model_kwargs = { ... "encoder_outputs": model.get_encoder()( ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ... ) ... } >>> # instantiate beam scorer >>> beam_scorer = BeamSearchScorer( ... batch_size=1, ... max_length=model.config.max_length, ... num_beams=num_beams, ... device=model.device, ... num_beam_groups=3, ... ) >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList( ... [ ... HammingDiversityLogitsProcessor(5.5, num_beams=6, num_beam_groups=3), ... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ... ] ... ) >>> outputs = model.group_beam_search( ... input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs ... ) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Wie alt bist du?'] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use" " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] output_scores = output_scores if output_scores is not None else self.generation_config.output_scores output_attentions = ( output_attentions if output_attentions is not None else self.generation_config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.generation_config.return_dict_in_generate ) num_beams = beam_scorer.num_beams num_beam_groups = beam_scorer.num_beam_groups num_sub_beams = num_beams // num_beam_groups batch_size = len(beam_scorer._beam_hyps) // num_beam_groups device = input_ids.device batch_beam_size, cur_len = input_ids.shape if return_dict_in_generate and output_scores: beam_indices = [tuple(() for _ in range(num_sub_beams * batch_size)) for _ in range(num_beam_groups)] else: beam_indices = None if num_beams * batch_size != batch_beam_size: raise ValueError( f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) # initialise score of first beam of each group with 0 and the rest with -1e9. This ensures that the beams in # the same group don't produce same tokens everytime. beam_scores = torch.full((batch_size, num_beams), -1e9, dtype=torch.float, device=device) beam_scores[:, ::num_sub_beams] = 0 beam_scores = beam_scores.view((batch_size * num_beams,)) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break # predicted tokens in cur_len step current_tokens = torch.zeros(batch_size * num_beams, dtype=input_ids.dtype, device=device) # indices which will form the beams in the next time step reordering_indices = torch.zeros(batch_size * num_beams, dtype=torch.long, device=device) # do one decoder step on all beams of all sentences in batch model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: cur_len = cur_len + 1 continue # don't waste resources running the code we don't need if output_scores: processed_score = torch.zeros_like(outputs.logits[:, -1, :]) for beam_group_idx in range(num_beam_groups): group_start_idx = beam_group_idx * num_sub_beams group_end_idx = min(group_start_idx + num_sub_beams, num_beams) group_size = group_end_idx - group_start_idx # indices of beams of current group among all sentences in batch batch_group_indices = [] for batch_idx in range(batch_size): batch_group_indices.extend( [batch_idx * num_beams + idx for idx in range(group_start_idx, group_end_idx)] ) group_input_ids = input_ids[batch_group_indices] # select outputs of beams of current group only next_token_logits = outputs.logits[batch_group_indices, -1, :] # hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id` # cannot be generated both before and after the `nn.functional.log_softmax` operation. next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len) next_token_scores = nn.functional.log_softmax( next_token_logits, dim=-1 ) # (batch_size * group_size, vocab_size) vocab_size = next_token_scores.shape[-1] next_token_scores_processed = logits_processor( group_input_ids, next_token_scores, current_tokens=current_tokens, beam_group_idx=beam_group_idx ) next_token_scores = next_token_scores_processed + beam_scores[batch_group_indices].unsqueeze(-1) next_token_scores = next_token_scores.expand_as(next_token_scores_processed) if output_scores: processed_score[batch_group_indices] = next_token_scores_processed # reshape for beam search next_token_scores = next_token_scores.view(batch_size, group_size * vocab_size) # Sample 2 next tokens for each beam (so we have some spare tokens and match output of beam search) next_token_scores, next_tokens = torch.topk( next_token_scores, 2 * group_size, dim=1, largest=True, sorted=True ) next_indices = torch.div(next_tokens, vocab_size, rounding_mode="floor") next_tokens = next_tokens % vocab_size # stateless process_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None beam_outputs = beam_scorer.process( group_input_ids, next_token_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, beam_indices=process_beam_indices, group_index=beam_group_idx, ) beam_scores[batch_group_indices] = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] beam_idx = beam_outputs["next_beam_indices"] if return_dict_in_generate and output_scores: beam_indices[beam_group_idx] = tuple( beam_indices[beam_group_idx][beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices[0])) ) input_ids[batch_group_indices] = group_input_ids[beam_idx] group_input_ids = torch.cat([group_input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) current_tokens[batch_group_indices] = group_input_ids[:, -1] # (beam_idx // group_size) -> batch_idx # (beam_idx % group_size) -> offset of idx inside the group reordering_indices[batch_group_indices] = ( num_beams * torch.div(beam_idx, group_size, rounding_mode="floor") + group_start_idx + (beam_idx % group_size) ) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (processed_score,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) input_ids = torch.cat([input_ids, current_tokens.unsqueeze(-1)], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) if model_kwargs["past_key_values"] is not None: model_kwargs["past_key_values"] = self._reorder_cache( model_kwargs["past_key_values"], reordering_indices ) # increase cur_len cur_len = cur_len + 1 if beam_scorer.is_done or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True final_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None sequence_outputs = beam_scorer.finalize( input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, max_length=stopping_criteria.max_length, beam_indices=final_beam_indices, ) if return_dict_in_generate: if not output_scores: sequence_outputs["sequence_scores"] = None if self.config.is_encoder_decoder: return BeamSearchEncoderDecoderOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, beam_indices=sequence_outputs["beam_indices"], encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return BeamSearchDecoderOnlyOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, beam_indices=sequence_outputs["beam_indices"], attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequence_outputs["sequences"] def constrained_beam_search( self, input_ids: torch.LongTensor, constrained_beam_scorer: ConstrainedBeamSearchScorer, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[Union[int, List[int]]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: Optional[bool] = None, **model_kwargs, ) -> Union[BeamSearchOutput, torch.LongTensor]: r""" Generates sequences of token ids for models with a language modeling head using **constrained beam search decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. <Tip warning={true}> In most cases, you do not need to call [`~generation.GenerationMixin.constrained_beam_search`] directly. Use generate() instead. For an overview of generation strategies and code examples, check the [following guide](../generation_strategies). </Tip> Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. constrained_beam_scorer (`ConstrainedBeamSearchScorer`): A derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and sorted during generation, while satisfying a list of positive constraints. For more information, the documentation of [`ConstrainedBeamSearchScorer`] should be read. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. logits_warper (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. max_length (`int`, *optional*, defaults to 20): **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated tokens. The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`generation.BeamSearchDecoderOnlyOutput`], [`~generation.BeamSearchEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation.BeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation.BeamSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForSeq2SeqLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... ConstrainedBeamSearchScorer, ... PhrasalConstraint, ... ) >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> encoder_input_str = "translate English to German: How old are you?" >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids >>> # lets run beam search using 3 beams >>> num_beams = 3 >>> # define decoder start token ids >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) >>> input_ids = input_ids * model.config.decoder_start_token_id >>> # add encoder_outputs to model keyword arguments >>> model_kwargs = { ... "encoder_outputs": model.get_encoder()( ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ... ) ... } >>> constraint_str = "Sie" >>> constraint_token_ids = tokenizer.encode(constraint_str)[:-1] # slice to remove eos token >>> constraints = [PhrasalConstraint(token_ids=constraint_token_ids)] >>> # instantiate beam scorer >>> beam_scorer = ConstrainedBeamSearchScorer( ... batch_size=1, num_beams=num_beams, device=model.device, constraints=constraints ... ) >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList( ... [ ... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ... ] ... ) >>> outputs = model.constrained_beam_search( ... input_ids, beam_scorer, constraints=constraints, logits_processor=logits_processor, **model_kwargs ... ) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Wie alt sind Sie?'] ```""" # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: warnings.warn( "`max_length` is deprecated in this function, use" " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) if len(stopping_criteria) == 0: warnings.warn("You don't have defined any stopping_criteria, this will likely loop forever", UserWarning) pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] output_scores = output_scores if output_scores is not None else self.generation_config.output_scores output_attentions = ( output_attentions if output_attentions is not None else self.generation_config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.generation_config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) batch_size = len(constrained_beam_scorer._beam_hyps) num_beams = constrained_beam_scorer.num_beams batch_beam_size, cur_len = input_ids.shape if num_beams * batch_size != batch_beam_size: raise ValueError( f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." ) # initialise score of first beam with 0 and the rest with -1e9. This makes sure that only tokens # of the first beam are considered to avoid sampling the exact same tokens across all beams. beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) beam_scores[:, 1:] = -1e9 beam_scores = beam_scores.view((batch_size * num_beams,)) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if synced_gpus and this_peer_finished: cur_len = cur_len + 1 continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] # hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id` # cannot be generated both before and after the `nn.functional.log_softmax` operation. next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len) next_token_scores = nn.functional.log_softmax( next_token_logits, dim=-1 ) # (batch_size * num_beams, vocab_size) next_token_scores_processed = logits_processor(input_ids, next_token_scores) next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as(next_token_scores) scores_for_all_vocab = next_token_scores.clone() # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_token_scores,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if self.config.is_encoder_decoder: cross_attentions += (outputs.cross_attentions,) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # reshape for beam search vocab_size = next_token_scores.shape[-1] next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) # Sample 2 next tokens for each beam (so we have some spare tokens and match output of beam search) next_token_scores, next_tokens = torch.topk( next_token_scores, 2 * num_beams, dim=1, largest=True, sorted=True ) next_indices = (next_tokens / vocab_size).long() next_tokens = next_tokens % vocab_size # stateless beam_outputs = constrained_beam_scorer.process( input_ids, next_token_scores, next_tokens, next_indices, scores_for_all_vocab, pad_token_id=pad_token_id, eos_token_id=eos_token_id, ) beam_scores = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] beam_idx = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) if model_kwargs["past_key_values"] is not None: model_kwargs["past_key_values"] = self._reorder_cache(model_kwargs["past_key_values"], beam_idx) # increase cur_len cur_len = cur_len + 1 if constrained_beam_scorer.is_done or stopping_criteria(input_ids, scores): if not synced_gpus: break else: this_peer_finished = True sequence_outputs = constrained_beam_scorer.finalize( input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, max_length=stopping_criteria.max_length, ) if return_dict_in_generate: if not output_scores: sequence_outputs["sequence_scores"] = None if self.config.is_encoder_decoder: return BeamSearchEncoderDecoderOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return BeamSearchDecoderOnlyOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequence_outputs["sequences"] def assisted_decoding( self, input_ids: torch.LongTensor, assistant_model: "PreTrainedModel", do_sample: bool = False, logits_processor: Optional[LogitsProcessorList] = None, logits_warper: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[Union[int, List[int]]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, synced_gpus: bool = False, streamer: Optional["BaseStreamer"] = None, **model_kwargs, ): r""" Generates sequences of token ids for models with a language modeling head using **greedy decoding** or **sample** (depending on `do_sample`), assisted by a smaller model. Can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. <Tip warning={true}> In most cases, you do not need to call [`~generation.GenerationMixin.assisted_decoding`] directly. Use generate() instead. For an overview of generation strategies and code examples, check the [following guide](../generation_strategies). </Tip> Parameters: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. assistant_model (`PreTrainedModel`, *optional*): An assistant model that can be used to accelerate generation. The assistant model must have the exact same tokenizer. The acceleration is achieved when forecasting candidate tokens with the assistent model is much faster than running generation with the model you're calling generate from. As such, the assistant model should be much smaller. do_sample (`bool`, *optional*, defaults to `False`): Whether or not to use sampling ; use greedy decoding otherwise. logits_processor (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. logits_warper (`LogitsProcessorList`, *optional*): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. stopping_criteria (`StoppingCriteriaList`, *optional*): An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] used to tell if the generation loop should stop. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) streamer (`BaseStreamer`, *optional*): Streamer object that will be used to stream the generated sequences. Generated tokens are passed through `streamer.put(token_ids)` and the streamer is responsible for any further processing. model_kwargs: Additional model specific keyword arguments will be forwarded to the `forward` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation.GreedySearchDecoderOnlyOutput`], [`~generation.GreedySearchEncoderDecoderOutput`] or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a [`~generation.GreedySearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation.GreedySearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... AutoModelForCausalLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... StoppingCriteriaList, ... MaxLengthCriteria, ... ) >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> model = AutoModelForCausalLM.from_pretrained("gpt2") >>> assistant_model = AutoModelForCausalLM.from_pretrained("distilgpt2") >>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token >>> model.generation_config.pad_token_id = model.generation_config.eos_token_id >>> input_prompt = "It might be possible to" >>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList( ... [ ... MinLengthLogitsProcessor(10, eos_token_id=model.generation_config.eos_token_id), ... ] ... ) >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)]) >>> outputs = model.assisted_decoding( ... input_ids, ... assistant_model=assistant_model, ... logits_processor=logits_processor, ... stopping_criteria=stopping_criteria, ... ) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ["It might be possible to get a better understanding of the nature of the problem, but it's not"] ```""" # Assistant: initialize assistant-related variables if not hasattr(assistant_model, "max_assistant_tokens"): assistant_model.max_assistant_tokens = 5 # this value, which will be updated, persists across calls # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id if eos_token_id is not None and pad_token_id is None: raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] eos_token_id_tensor = torch.tensor(eos_token_id).to(input_ids.device) if eos_token_id is not None else None output_scores = output_scores if output_scores is not None else self.generation_config.output_scores output_attentions = ( output_attentions if output_attentions is not None else self.generation_config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.generation_config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None cross_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) # keep track of which sequences are already finished unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) # other auxiliary variables max_len = stopping_criteria[0].max_length assistant_kv_indexing = ( 1 if "bloom" in assistant_model.__class__.__name__.lower() or ( assistant_model.config.architectures is not None and "bloom" in assistant_model.config.architectures[0].lower() ) else 0 ) this_peer_finished = False # used by synced_gpus only while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device) # send 0.0 if we finished, 1.0 otherwise dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) # did all peers finish? the reduced sum will be 0.0 then if this_peer_finished_flag.item() == 0.0: break # Assistant: main logic start cur_len = input_ids.shape[-1] # 1. Forecast next N tokens using the assistant model. This `for` block can be replaced with a # `.generate()` call if we decide to add `past_key_values` as a possible output of generate, as we # need access to the assistant cache to secure strong speedups. candidate_input_ids = input_ids for _ in range(int(assistant_model.max_assistant_tokens)): # 1.1. use the assistant model to obtain the next candidate logits if "assistant_past_key_values" in model_kwargs: prev_seq_len = model_kwargs["assistant_past_key_values"][0][assistant_kv_indexing].shape[-2] # `new_token_len` can be 1 or 2 (next token in assistant + last token picked by the larger model) new_token_len = candidate_input_ids.shape[1] - prev_seq_len assist_inputs = candidate_input_ids[:, -new_token_len:] assist_attn = torch.ones_like(candidate_input_ids) # TODO (joao): make it compatible with models that use unconventional fwd pass logic, like blip2 if assistant_model.config.is_encoder_decoder: assistant_model_outputs = assistant_model( decoder_input_ids=assist_inputs, decoder_attention_mask=assist_attn, past_key_values=model_kwargs["assistant_past_key_values"], encoder_outputs=model_kwargs["assistant_encoder_outputs"], ) else: assistant_model_outputs = assistant_model( assist_inputs, attention_mask=assist_attn, past_key_values=model_kwargs["assistant_past_key_values"], ) else: if assistant_model.config.is_encoder_decoder: assistant_model_outputs = assistant_model( decoder_input_ids=candidate_input_ids, encoder_outputs=model_kwargs["assistant_encoder_outputs"], ) else: assistant_model_outputs = assistant_model(candidate_input_ids) # 1.2. greedily select the next candidate token model_kwargs["assistant_past_key_values"] = assistant_model_outputs.past_key_values if len(logits_processor) > 0: assistant_model_outputs.logits[:, -1, :] = logits_processor( candidate_input_ids, assistant_model_outputs.logits[:, -1, :] ) new_token = assistant_model_outputs.logits[:, -1, :].argmax(dim=-1) candidate_input_ids = torch.cat((candidate_input_ids, new_token[:, None]), dim=-1) # 1.3. stop assistant generation on EOS if eos_token_id_tensor is not None: last_assistant_token_is_eos = new_token.tile(eos_token_id_tensor.shape[0], 1) last_assistant_token_is_eos = ( ~last_assistant_token_is_eos.ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0).bool() ) if last_assistant_token_is_eos: break else: last_assistant_token_is_eos = False candidate_length = candidate_input_ids.shape[1] - input_ids.shape[1] # 2. Use the original model to obtain the next token logits given the candidate sequence. We obtain # `candidate_length + 1` relevant logits from this process: in the event that all candidates are correct, # we use this forward pass to also pick the subsequent logits in the original model. # 2.1. Run a forward pass on the candidate sequence if "past_key_values" in model_kwargs: model_attn = torch.ones_like(candidate_input_ids) model_input_ids = candidate_input_ids[:, -candidate_length - 1 :] if self.config.is_encoder_decoder: outputs = self( decoder_input_ids=model_input_ids, decoder_attention_mask=model_attn, past_key_values=model_kwargs["past_key_values"], encoder_outputs=model_kwargs["encoder_outputs"], output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=True, ) else: outputs = self( model_input_ids, attention_mask=model_attn, past_key_values=model_kwargs["past_key_values"], output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=True, ) else: if self.config.is_encoder_decoder: outputs = self( decoder_input_ids=candidate_input_ids, encoder_outputs=model_kwargs["encoder_outputs"], output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=True, ) else: outputs = self( candidate_input_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=True, ) # 2.2. Process the new logits new_logits = outputs.logits[:, -candidate_length - 1 :] # excludes the input prompt if present if len(logits_processor) > 0: for i in range(candidate_length): new_logits[:, i, :] = logits_processor(candidate_input_ids[:, : cur_len + i], new_logits[:, i, :]) if len(logits_warper) > 0: for i in range(candidate_length): new_logits[:, i, :] = logits_warper(candidate_input_ids[:, : cur_len + i], new_logits[:, i, :]) # 3. Obtain the next tokens from the original model logits. if do_sample: probs = new_logits[:, -candidate_length - 1 :, :].softmax(dim=-1) selected_tokens = torch.multinomial(probs[0, :, :], num_samples=1).squeeze(1)[None, :] else: selected_tokens = new_logits[:, -candidate_length - 1 :, :].argmax(dim=-1) # 4. Compare the argmax from the original model logits with the assistant forecasted tokens. We can keep # the assistant forecasted tokens until the first mismatch, or until the max length is reached. candidate_new_tokens = candidate_input_ids[:, -candidate_length:] n_matches = ((~(candidate_new_tokens == selected_tokens[:, :-1])).cumsum(dim=-1) < 1).sum() # 5. Update variables according to the number of matching assistant tokens. Remember: the token generated # by the model after the last candidate match is also valid, as it is generated from a correct sequence. # Because of this last token, assisted generation search reduces to a normal greedy search/sample if there # is no match. # 5.1. Ensure we don't generate beyond max_len or an EOS token if last_assistant_token_is_eos and n_matches == candidate_length: n_matches -= 1 n_matches = min(n_matches, max_len - cur_len - 1) # 5.2. Get the valid continuation, after the matching tokens valid_tokens = selected_tokens[:, : n_matches + 1] input_ids = torch.cat((input_ids, valid_tokens), dim=-1) if streamer is not None: streamer.put(valid_tokens.cpu()) new_cur_len = input_ids.shape[-1] # 5.3. Discard past key values relative to unused assistant tokens new_cache_size = new_cur_len - 1 outputs.past_key_values = _crop_past_key_values(self, outputs.past_key_values, new_cache_size) model_kwargs["assistant_past_key_values"] = _crop_past_key_values( assistant_model, model_kwargs["assistant_past_key_values"], new_cache_size - 1 ) # the assistant does not have the token after the last match, hence the -1 # 6. Adjust the max number of assistant tokens to use in the next iteration. This is a simple heuristic, # probably can be improved -- we want to balance the benefits of getting assistant tokens correct with the # cost of forecasting incorrect assistant tokens. if n_matches == int(assistant_model.max_assistant_tokens): assistant_model.max_assistant_tokens += 2.0 else: assistant_model.max_assistant_tokens = max(1.0, assistant_model.max_assistant_tokens - 1.0) # Assistant: main logic end if synced_gpus and this_peer_finished: continue # don't waste resources running the code we don't need # Store scores, attentions and hidden_states when required # Assistant: modified to append one tuple element per token, as in the other generation methods. if return_dict_in_generate: if output_scores: scores += tuple(new_logits[:, i, :] for i in range(n_matches + 1)) if "past_key_values" not in model_kwargs: added_len = new_cur_len else: added_len = n_matches + 1 if output_attentions: if self.config.is_encoder_decoder: cross_attentions = _split_model_outputs( cross_attentions, outputs.cross_attentions, cur_len, added_len ) decoder_attentions = _split_model_outputs( decoder_attentions, outputs.decoder_attentions, cur_len, added_len, is_decoder_attention=True, ) else: decoder_attentions = _split_model_outputs( decoder_attentions, outputs.attentions, cur_len, added_len, is_decoder_attention=True, ) if output_hidden_states: if self.config.is_encoder_decoder: decoder_hidden_states = _split_model_outputs( decoder_hidden_states, outputs.decoder_hidden_states, cur_len, added_len ) else: decoder_hidden_states = _split_model_outputs( decoder_hidden_states, outputs.hidden_states, cur_len, added_len ) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # if eos_token was found in one sentence, set sentence to finished if eos_token_id_tensor is not None: unfinished_sequences = unfinished_sequences.mul( input_ids[:, -1] .tile(eos_token_id_tensor.shape[0], 1) .ne(eos_token_id_tensor.unsqueeze(1)) .prod(dim=0) ) # stop when each sentence is finished if unfinished_sequences.max() == 0: this_peer_finished = True # stop if we exceed the maximum length if stopping_criteria(input_ids, scores): this_peer_finished = True if this_peer_finished and not synced_gpus: break if streamer is not None: streamer.end() if return_dict_in_generate: if self.config.is_encoder_decoder: return GreedySearchEncoderDecoderOutput( sequences=input_ids, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return GreedySearchDecoderOnlyOutput( sequences=input_ids, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return input_ids def _crop_past_key_values(model, past_key_values, maximum_length): """Crops the past key values up to a certain maximum length.""" new_past = [] if model.config.is_encoder_decoder: for idx in range(len(past_key_values)): new_past.append( ( past_key_values[idx][0][:, :, :maximum_length, :], past_key_values[idx][1][:, :, :maximum_length, :], past_key_values[idx][2], past_key_values[idx][3], ) ) past_key_values = tuple(new_past) # bloom is special elif "bloom" in model.__class__.__name__.lower() or ( model.config.architectures is not None and "bloom" in model.config.architectures[0].lower() ): for idx in range(len(past_key_values)): new_past.append( ( past_key_values[idx][0][:, :, :maximum_length], past_key_values[idx][1][:, :maximum_length, :], ) ) past_key_values = tuple(new_past) # gptbigcode is too elif "gptbigcode" in model.__class__.__name__.lower() or ( model.config.architectures is not None and "gptbigcode" in model.config.architectures[0].lower() ): if model.config.multi_query: for idx in range(len(past_key_values)): past_key_values[idx] = past_key_values[idx][:, :maximum_length, :] else: for idx in range(len(past_key_values)): past_key_values[idx] = past_key_values[idx][:, :, :maximum_length, :] else: for idx in range(len(past_key_values)): new_past.append( ( past_key_values[idx][0][:, :, :maximum_length, :], past_key_values[idx][1][:, :, :maximum_length, :], ) ) past_key_values = tuple(new_past) return past_key_values def _split_model_outputs(outputs, new_outputs, cur_len, added_len, is_decoder_attention=False): """ Given the (decoder/cross attentions)/(decoder hidden states) for multiple generated tokens, splits it into a tuple where each member corresponds to a single generated token. """ # Retrocompatibility: in our generation functions, the first iteration includes the attention/hidden states for the # prompt. if len(outputs) == 0: new_tuple = () for layer in new_outputs: last_dim_size = cur_len if is_decoder_attention else layer.shape[-1] new_tuple += (layer[..., :cur_len, :last_dim_size],) outputs += (new_tuple,) # The first iteration contains the prompt + 1 generated token, let's update the length variables accordingly cur_len += 1 added_len -= cur_len for i in range(added_len): new_tuple = () for layer in new_outputs: last_dim_size = cur_len + i if is_decoder_attention else layer.shape[-1] new_tuple += (layer[..., i : i + 1, :last_dim_size],) outputs += (new_tuple,) return outputs def top_k_top_p_filtering( logits: torch.FloatTensor, top_k: int = 0, top_p: float = 1.0, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1, ) -> torch.FloatTensor: """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (batch size, vocabulary size) top_k (`int`, *optional*, defaults to 0): If > 0, only keep the top k tokens with highest probability (top-k filtering) top_p (`float`, *optional*, defaults to 1.0): If < 1.0, only keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimumber of tokens we keep per batch example in the output. From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 """ if top_k > 0: logits = TopKLogitsWarper(top_k=top_k, filter_value=filter_value, min_tokens_to_keep=min_tokens_to_keep)( None, logits ) if 0 <= top_p <= 1.0: logits = TopPLogitsWarper(top_p=top_p, filter_value=filter_value, min_tokens_to_keep=min_tokens_to_keep)( None, logits ) return logits def _ranking_fast( context_hidden: torch.FloatTensor, next_hidden: torch.FloatTensor, next_top_k_probs: torch.FloatTensor, alpha: float, beam_width: int, ) -> torch.FloatTensor: """ Reranks the top_k candidates based on a degeneration penalty (cosine similarity with previous tokens), as described in the paper "A Contrastive Framework for Neural Text Generation". Returns the index of the best candidate for each row in the batch. """ norm_context_hidden = context_hidden / context_hidden.norm(dim=2, keepdim=True) norm_next_hidden = next_hidden / next_hidden.norm(dim=2, keepdim=True) cosine_matrix = torch.matmul(norm_context_hidden, norm_next_hidden.transpose(1, 2)).squeeze(-1) # [B*K, S] degeneration_penalty, _ = torch.max(cosine_matrix, dim=-1) # [B*K] next_top_k_probs = next_top_k_probs.view(-1) # [B*K] contrastive_score = (1.0 - alpha) * next_top_k_probs - alpha * degeneration_penalty contrastive_score = torch.stack(torch.split(contrastive_score, beam_width)) # [B, K] _, selected_idx = contrastive_score.max(dim=-1) # [B] return selected_idx
252,409
53.130388
327
py
transformers
transformers-main/src/transformers/generation/beam_search.py
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC, abstractmethod from collections import UserDict from typing import Dict, List, Optional, Tuple, Union import numpy as np import torch from ..utils import add_start_docstrings from .beam_constraints import Constraint, ConstraintListState PROCESS_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) next_scores (`torch.FloatTensor` of shape `(batch_size, 2 * num_beams)`): Current scores of the top `2 * num_beams` non-finished beam hypotheses. next_tokens (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`): `input_ids` of the tokens corresponding to the top `2 * num_beams` non-finished beam hypotheses. next_indices (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`): Beam indices indicating to which beam hypothesis the `next_tokens` correspond. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. beam_indices (`torch.LongTensor]`, *optional*): Beam indices indicating to which beam hypothesis each token correspond. group_index (`int`, *optional*): The index of the group of beams. Used with [`~PreTrainedModel.group_beam_search`]. Return: `UserDict`: A dictionary composed of the fields as defined above: - **next_beam_scores** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Updated scores of all non-finished beams. - **next_beam_tokens** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Next tokens to be added to the non-finished beam_hypotheses. - **next_beam_indices** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Beam indices indicating to which beam the next tokens shall be added. """ FINALIZE_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) final_beam_scores (`torch.FloatTensor` of shape `(batch_size * num_beams)`): The final scores of all non-finished beams. final_beam_tokens (`torch.FloatTensor` of shape `(batch_size * num_beams)`): The last tokens to be added to the non-finished beam_hypotheses. final_beam_indices (`torch.FloatTensor` of shape `(batch_size * num_beams)`): The beam indices indicating to which beam the `final_beam_tokens` shall be added. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. Return: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. """ class BeamScorer(ABC): """ Abstract base class for all beam scorers that are used for [`~PreTrainedModel.beam_search`] and [`~PreTrainedModel.beam_sample`]. """ @abstractmethod @add_start_docstrings(PROCESS_INPUTS_DOCSTRING) def process( self, input_ids: torch.LongTensor, next_scores: torch.FloatTensor, next_tokens: torch.LongTensor, next_indices: torch.LongTensor, **kwargs, ) -> Tuple[torch.Tensor]: raise NotImplementedError("This is an abstract method.") @abstractmethod @add_start_docstrings(FINALIZE_INPUTS_DOCSTRING) def finalize( self, input_ids: torch.LongTensor, next_scores: torch.FloatTensor, next_tokens: torch.LongTensor, next_indices: torch.LongTensor, max_length: int, **kwargs, ) -> torch.LongTensor: raise NotImplementedError("This is an abstract method.") class BeamSearchScorer(BeamScorer): r""" [`BeamScorer`] implementing standard beam search decoding. Adapted in part from [Facebook's XLM beam search code](https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529). Reference for the diverse beam search algorithm and implementation [Ashwin Kalyan's DBS implementation](https://github.com/ashwinkalyan/dbs/blob/master/dbs/beam_utils.lua) Args: batch_size (`int`): Batch Size of `input_ids` for which standard beam search decoding is run in parallel. num_beams (`int`): Number of beams for beam search. device (`torch.device`): Defines the device type (*e.g.*, `"cpu"` or `"cuda"`) on which this instance of `BeamSearchScorer` will be allocated. length_penalty (`float`, *optional*, defaults to 1.0): Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while `length_penalty` < 0.0 encourages shorter sequences. do_early_stopping (`bool` or `str`, *optional*, defaults to `False`): Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values: `True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an heuristic is applied and the generation stops when is it very unlikely to find better candidates; `"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical beam search algorithm). num_beam_hyps_to_keep (`int`, *optional*, defaults to 1): The number of beam hypotheses that shall be returned upon calling [`~transformer.BeamSearchScorer.finalize`]. num_beam_groups (`int`): Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams. See [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details. max_length (`int`, *optional*): The maximum length of the sequence to be generated. """ def __init__( self, batch_size: int, num_beams: int, device: torch.device, length_penalty: Optional[float] = 1.0, do_early_stopping: Optional[Union[bool, str]] = False, num_beam_hyps_to_keep: Optional[int] = 1, num_beam_groups: Optional[int] = 1, max_length: Optional[int] = None, ): self.num_beams = num_beams self.device = device self.length_penalty = length_penalty self.do_early_stopping = do_early_stopping self.num_beam_hyps_to_keep = num_beam_hyps_to_keep self.num_beam_groups = num_beam_groups self.group_size = self.num_beams // self.num_beam_groups self._is_init = False # self._beam_hyps[i*self.num_beam_groups+j] is the beam_hyps of the j-th group in the i-th mini-batch. # If group_beam_search is not used, the list consists of `batch_size` beam_hyps. self._beam_hyps = [ BeamHypotheses( num_beams=self.group_size, length_penalty=self.length_penalty, early_stopping=self.do_early_stopping, max_length=max_length, ) for _ in range(batch_size * self.num_beam_groups) ] # self._done[i*self.num_beam_groups+j] indicates whether the generation of the beam_hyps of the j-th group # in the i-th mini-batch is complete. self._done = torch.tensor( [False for _ in range(batch_size * self.num_beam_groups)], dtype=torch.bool, device=self.device ) if not isinstance(num_beams, int) or num_beams <= 1: raise ValueError( f"`num_beams` has to be an integer strictly greater than 1, but is {num_beams}. For `num_beams` == 1," " one should make use of `greedy_search` instead." ) if not isinstance(num_beam_groups, int) or (num_beam_groups > num_beams) or (num_beams % num_beam_groups != 0): raise ValueError( "`num_beam_groups` has to be an integer smaller or equal than `num_beams` and `num_beams` has to be" f" divisible by `num_beam_groups`, but is {num_beam_groups} with `num_beams` being {num_beams}." ) @property def is_done(self) -> bool: return self._done.all() def process( self, input_ids: torch.LongTensor, next_scores: torch.FloatTensor, next_tokens: torch.LongTensor, next_indices: torch.LongTensor, pad_token_id: Optional[int] = None, eos_token_id: Optional[Union[int, List[int]]] = None, beam_indices: Optional[torch.LongTensor] = None, group_index: Optional[int] = 0, ) -> Dict[str, torch.Tensor]: cur_len = input_ids.shape[-1] + 1 # add up to the length which the next_scores is calculated on batch_size = len(self._beam_hyps) // self.num_beam_groups if not (batch_size == (input_ids.shape[0] // self.group_size)): if self.num_beam_groups > 1: raise ValueError( f"A group beam size of {input_ids.shape[0]} is used as the input, but a group beam " f"size of {self.group_size} is expected by the beam scorer." ) else: raise ValueError( f"A beam size of {input_ids.shape[0]} is used as the input, but a beam size of " f"{self.group_size} is expected by the beam scorer." ) device = input_ids.device next_beam_scores = torch.zeros((batch_size, self.group_size), dtype=next_scores.dtype, device=device) next_beam_tokens = torch.zeros((batch_size, self.group_size), dtype=next_tokens.dtype, device=device) next_beam_indices = torch.zeros((batch_size, self.group_size), dtype=next_indices.dtype, device=device) if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] for batch_idx in range(batch_size): batch_group_idx = batch_idx * self.num_beam_groups + group_index if self._done[batch_group_idx]: if self.num_beams < len(self._beam_hyps[batch_group_idx]): raise ValueError(f"Batch can only be done if at least {self.num_beams} beams have been generated") if eos_token_id is None or pad_token_id is None: raise ValueError("Generated beams >= num_beams -> eos_token_id and pad_token have to be defined") # pad the batch next_beam_scores[batch_idx, :] = 0 next_beam_tokens[batch_idx, :] = pad_token_id next_beam_indices[batch_idx, :] = 0 continue # next tokens for this sentence beam_idx = 0 for beam_token_rank, (next_token, next_score, next_index) in enumerate( zip(next_tokens[batch_idx], next_scores[batch_idx], next_indices[batch_idx]) ): batch_beam_idx = batch_idx * self.group_size + next_index # add to generated hypotheses if end of sentence if (eos_token_id is not None) and (next_token.item() in eos_token_id): # if beam_token does not belong to top num_beams tokens, it should not be added is_beam_token_worse_than_top_num_beams = beam_token_rank >= self.group_size if is_beam_token_worse_than_top_num_beams: continue if beam_indices is not None: beam_index = beam_indices[batch_beam_idx] beam_index = beam_index + (batch_beam_idx,) else: beam_index = None self._beam_hyps[batch_group_idx].add( input_ids[batch_beam_idx].clone(), next_score.item(), beam_indices=beam_index, ) else: # add next predicted token since it is not eos_token next_beam_scores[batch_idx, beam_idx] = next_score next_beam_tokens[batch_idx, beam_idx] = next_token next_beam_indices[batch_idx, beam_idx] = batch_beam_idx beam_idx += 1 # once the beam for next step is full, don't add more tokens to it. if beam_idx == self.group_size: break if beam_idx < self.group_size: raise ValueError( f"At most {self.group_size} tokens in {next_tokens[batch_idx]} can be equal to `eos_token_id:" f" {eos_token_id}`. Make sure {next_tokens[batch_idx]} are corrected." ) # Check if we are done so that we can save a pad step if all(done) self._done[batch_group_idx] = self._done[batch_group_idx] or self._beam_hyps[batch_group_idx].is_done( next_scores[batch_idx].max().item(), cur_len ) return UserDict( { "next_beam_scores": next_beam_scores.view(-1), "next_beam_tokens": next_beam_tokens.view(-1), "next_beam_indices": next_beam_indices.view(-1), } ) def finalize( self, input_ids: torch.LongTensor, final_beam_scores: torch.FloatTensor, final_beam_tokens: torch.LongTensor, final_beam_indices: torch.LongTensor, max_length: int, pad_token_id: Optional[int] = None, eos_token_id: Optional[Union[int, List[int]]] = None, beam_indices: Optional[torch.LongTensor] = None, ) -> Tuple[torch.LongTensor]: batch_size = len(self._beam_hyps) // self.num_beam_groups if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] # finalize all open beam hypotheses and add to generated hypotheses for batch_group_idx, beam_hyp in enumerate(self._beam_hyps): if self._done[batch_group_idx]: continue # all open beam hypotheses are added to the beam hypothesis # beam hypothesis class automatically keeps the best beams for index_per_group in range(self.group_size): batch_beam_idx = batch_group_idx * self.group_size + index_per_group final_score = final_beam_scores[batch_beam_idx].item() final_tokens = input_ids[batch_beam_idx] beam_index = beam_indices[batch_beam_idx] if beam_indices is not None else None beam_hyp.add(final_tokens, final_score, beam_indices=beam_index) # select the best hypotheses sent_lengths = input_ids.new(batch_size * self.num_beam_hyps_to_keep) best = [] best_indices = [] best_scores = torch.zeros(batch_size * self.num_beam_hyps_to_keep, device=self.device, dtype=torch.float32) # retrieve best hypotheses for i in range(batch_size): beam_hyps_in_batch = self._beam_hyps[i * self.num_beam_groups : (i + 1) * self.num_beam_groups] candidate_beams = [beam for beam_hyp in beam_hyps_in_batch for beam in beam_hyp.beams] sorted_hyps = sorted(candidate_beams, key=lambda x: x[0]) for j in range(self.num_beam_hyps_to_keep): best_hyp_tuple = sorted_hyps.pop() best_score = best_hyp_tuple[0] best_hyp = best_hyp_tuple[1] best_index = best_hyp_tuple[2] sent_lengths[self.num_beam_hyps_to_keep * i + j] = len(best_hyp) # append hyp to lists best.append(best_hyp) # append indices to list best_indices.append(best_index) best_scores[i * self.num_beam_hyps_to_keep + j] = best_score # prepare for adding eos sent_lengths_max = sent_lengths.max().item() + 1 sent_max_len = min(sent_lengths_max, max_length) if max_length is not None else sent_lengths_max decoded: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len) if len(best_indices) > 0 and best_indices[0] is not None: indices: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len) else: indices = None # shorter batches are padded if needed if sent_lengths.min().item() != sent_lengths.max().item(): if pad_token_id is None: raise ValueError("`pad_token_id` has to be defined") decoded.fill_(pad_token_id) if indices is not None: indices.fill_(-1) # fill with hypotheses and eos_token_id if the latter fits in for i, (hypo, best_idx) in enumerate(zip(best, best_indices)): decoded[i, : sent_lengths[i]] = hypo if indices is not None: indices[i, : len(best_idx)] = torch.tensor(best_idx) if sent_lengths[i] < sent_max_len: # inserting only the first eos_token_id decoded[i, sent_lengths[i]] = eos_token_id[0] return UserDict( { "sequences": decoded, "sequence_scores": best_scores, "beam_indices": indices, } ) class ConstrainedBeamSearchScorer(BeamScorer): r""" [`BeamScorer`] implementing constrained beam search decoding. Args: batch_size (`int`): Batch Size of `input_ids` for which standard beam search decoding is run in parallel. num_beams (`int`): Number of beams for beam search. constraints (`List[Constraint]`): A list of positive constraints represented as `Constraint` objects that must be fulfilled in the generation output. For more information, the documentation of [`Constraint`] should be read. device (`torch.device`): Defines the device type (*e.g.*, `"cpu"` or `"cuda"`) on which this instance of `BeamSearchScorer` will be allocated. length_penalty (`float`, *optional*, defaults to 1.0): Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while `length_penalty` < 0.0 encourages shorter sequences. do_early_stopping (`bool` or `str`, *optional*, defaults to `False`): Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values: `True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an heuristic is applied and the generation stops when is it very unlikely to find better candidates; `"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical beam search algorithm). num_beam_hyps_to_keep (`int`, *optional*, defaults to 1): The number of beam hypotheses that shall be returned upon calling [`~transformer.BeamSearchScorer.finalize`]. num_beam_groups (`int`): Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams. See [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details. max_length (`int`, *optional*): The maximum length of the sequence to be generated. """ def __init__( self, batch_size: int, num_beams: int, constraints: List[Constraint], device: torch.device, length_penalty: Optional[float] = 1.0, do_early_stopping: Optional[Union[bool, str]] = False, num_beam_hyps_to_keep: Optional[int] = 1, num_beam_groups: Optional[int] = 1, max_length: Optional[int] = None, ): self.num_beams = num_beams self.device = device self.length_penalty = length_penalty self.do_early_stopping = do_early_stopping self.num_beam_hyps_to_keep = num_beam_hyps_to_keep self.num_beam_groups = num_beam_groups self.group_size = self.num_beams // self.num_beam_groups self.constraints = constraints self._is_init = False self._beam_hyps = [ BeamHypotheses( num_beams=self.num_beams, length_penalty=self.length_penalty, early_stopping=self.do_early_stopping, max_length=max_length, ) for _ in range(batch_size) ] self._done = torch.tensor([False for _ in range(batch_size)], dtype=torch.bool, device=self.device) if not isinstance(num_beams, int) or num_beams <= 1: raise ValueError( f"`num_beams` has to be an integer strictly greater than 1, but is {num_beams}. For `num_beams` == 1," " one should make use of `greedy_search` instead." ) if not isinstance(num_beam_groups, int) or (num_beam_groups > num_beams) or (num_beams % num_beam_groups != 0): raise ValueError( "`num_beam_groups` has to be an integer smaller or equal than `num_beams` and `num_beams` has to be" f" divisible by `num_beam_groups`, but is {num_beam_groups} with `num_beams` being {num_beams}." ) @property def is_done(self) -> bool: return self._done.all() def make_constraint_states(self, n): return [ConstraintListState([constraint.copy() for constraint in self.constraints]) for _ in range(n)] def check_completes_constraints(self, sequence): new_state = self.make_constraint_states(1)[0] new_state.reset(sequence) return new_state.completed def process( self, input_ids: torch.LongTensor, next_scores: torch.FloatTensor, next_tokens: torch.LongTensor, next_indices: torch.LongTensor, scores_for_all_vocab: torch.FloatTensor, pad_token_id: Optional[int] = None, eos_token_id: Optional[Union[int, List[int]]] = None, ) -> Tuple[torch.Tensor]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) next_scores (`torch.FloatTensor` of shape `(batch_size, 2 * num_beams)`): Current scores of the top `2 * num_beams` non-finished beam hypotheses. next_tokens (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`): `input_ids` of the tokens corresponding to the top `2 * num_beams` non-finished beam hypotheses. next_indices (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`): Beam indices indicating to which beam hypothesis the `next_tokens` correspond. scores_for_all_vocab (`torch.FloatTensor` of shape `(batch_size * num_beams, sequence_length)`): The scores of all tokens in the vocabulary for each of the beam hypotheses. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. Return: `UserDict`: A dictionary composed of the fields as defined above: - **next_beam_scores** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Updated scores of all non-finished beams. - **next_beam_tokens** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Next tokens to be added to the non-finished beam_hypotheses. - **next_beam_indices** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Beam indices indicating to which beam the next tokens shall be added. """ cur_len = input_ids.shape[-1] + 1 # add up to the length which the next_scores is calculated on batch_size = len(self._beam_hyps) if not (batch_size == (input_ids.shape[0] // self.group_size)): if self.num_beam_groups > 1: raise ValueError( f"A group beam size of {input_ids.shape[0]} is used as the input, but a group beam " f"size of {self.group_size} is expected by the beam scorer." ) else: raise ValueError( f"A beam size of {input_ids.shape[0]} is used as the input, but a beam size of " f"{self.group_size} is expected by the beam scorer." ) device = input_ids.device next_beam_scores = torch.zeros((batch_size, self.group_size), dtype=next_scores.dtype, device=device) next_beam_tokens = torch.zeros((batch_size, self.group_size), dtype=next_tokens.dtype, device=device) next_beam_indices = torch.zeros((batch_size, self.group_size), dtype=next_indices.dtype, device=device) if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] for batch_idx, beam_hyp in enumerate(self._beam_hyps): if self._done[batch_idx]: if self.num_beams < len(beam_hyp): raise ValueError(f"Batch can only be done if at least {self.num_beams} beams have been generated") if eos_token_id is None or pad_token_id is None: raise ValueError("Generated beams >= num_beams -> eos_token_id and pad_token have to be defined") # pad the batch next_beam_scores[batch_idx, :] = 0 next_beam_tokens[batch_idx, :] = pad_token_id next_beam_indices[batch_idx, :] = 0 continue # next tokens for this sentence. beam_idx = 0 for beam_token_rank, (next_token, next_score, next_index) in enumerate( zip(next_tokens[batch_idx], next_scores[batch_idx], next_indices[batch_idx]) ): batch_beam_idx = batch_idx * self.group_size + next_index # add to generated hypotheses if end of sentence if (eos_token_id is not None) and (next_token.item() in eos_token_id): # if beam_token does not belong to top num_beams tokens, it should not be added is_beam_token_worse_than_top_num_beams = beam_token_rank >= self.group_size if is_beam_token_worse_than_top_num_beams: continue completes_constraint = self.check_completes_constraints(input_ids[batch_beam_idx].cpu().tolist()) if completes_constraint: beam_hyp.add( input_ids[batch_beam_idx].clone(), next_score.item(), ) else: # add next predicted token since it is not eos_token next_beam_scores[batch_idx, beam_idx] = next_score next_beam_tokens[batch_idx, beam_idx] = next_token next_beam_indices[batch_idx, beam_idx] = batch_beam_idx beam_idx += 1 # once the beam for next step is full, don't add more tokens to it. if beam_idx == self.group_size: break new_scores, new_tokens, new_indices = self.step_sentence_constraint( batch_idx, input_ids, scores_for_all_vocab, next_beam_scores[batch_idx], next_beam_tokens[batch_idx], next_beam_indices[batch_idx], ) next_beam_scores[batch_idx] = new_scores next_beam_tokens[batch_idx] = new_tokens next_beam_indices[batch_idx] = new_indices if beam_idx < self.group_size: raise ValueError( f"At most {self.group_size} tokens in {next_tokens[batch_idx]} can be equal to `eos_token_id:" f" {eos_token_id}`. Make sure {next_tokens[batch_idx]} are corrected." ) # Check if we are done so that we can save a pad step if all(done) self._done[batch_idx] = self._done[batch_idx] or beam_hyp.is_done( next_scores[batch_idx].max().item(), cur_len ) return UserDict( { "next_beam_scores": next_beam_scores.view(-1), "next_beam_tokens": next_beam_tokens.view(-1), "next_beam_indices": next_beam_indices.view(-1), } ) def step_sentence_constraint( self, batch_idx: int, input_ids: torch.LongTensor, vocab_scores: torch.FloatTensor, sent_beam_scores: torch.FloatTensor, sent_beam_tokens: torch.LongTensor, sent_beam_indices: torch.LongTensor, push_progress: bool = False, ): # sent_beam_tokens are the next {num_beams} number of tokens that are under consideration for this beam # (candidate next tokens) # 1. Adding "advance_tokens" # using ConstraintStateList.advance(), we propose new tokens to be added into this "candidate list" that will # advance us in fulfilling the constraints. # 2. Selecting best candidates such that we end up with highest probable candidates # that fulfill our constraints. orig_len = sent_beam_indices.size(0) device = sent_beam_indices.device # initialize states topk_contraint_states = self.make_constraint_states(orig_len) advance_constraint_states = self.make_constraint_states(orig_len) sidx, eidx = batch_idx * orig_len, (batch_idx + 1) * orig_len this_batch_input_ids = input_ids[sidx:eidx] this_batch_token_scores = vocab_scores[sidx:eidx] full_hypotheses = torch.cat((input_ids[sent_beam_indices], sent_beam_tokens.unsqueeze(-1)), dim=-1) # need to make new hypothesis that advance the constraints track_new = { "new_seqs": full_hypotheses.tolist(), "new_states": [], "new_indices": [], "new_tokens": [], "new_scores": [], } for seq_idx, pre_seq in enumerate(this_batch_input_ids): # pre_seq = ith sequence generated before this step. # input_ids -> (topk) generic beam search best model next tokens # -> (advance) constraints forcing the next token # either way, we need to sort them into "banks" later, so store a "ConstraintListState" for all types of # hypotheses. topk_state = topk_contraint_states[seq_idx] topk_state.reset(full_hypotheses[seq_idx].cpu().tolist()) advance_state = advance_constraint_states[seq_idx] advance_state.reset(pre_seq.cpu().tolist()) if not advance_state.completed: advance_tokens = torch.LongTensor(advance_state.advance()).to(device) for advance_token in advance_tokens: # since adding each `advance_token` leads to a different hypothesis, create new state instance. new_state = advance_state.copy(stateful=True) new_state.add(advance_token.cpu().tolist()) advance_seq = torch.cat((pre_seq, advance_token.unsqueeze(0)), -1).cpu().tolist() if advance_seq not in track_new["new_seqs"]: # prevent duplicates, which are basically bound to happen in this process. track_new["new_seqs"].append(advance_seq) track_new["new_indices"].append(sidx + seq_idx) # idx -> global idx across all the batches track_new["new_tokens"].append(advance_token) track_new["new_scores"].append(this_batch_token_scores[seq_idx].take(advance_token)) track_new["new_states"].append(new_state) elif push_progress: # Basically, `sent_beam_indices` often chooses very little among `input_ids` the generated sequences that # actually fulfill our constraints. For example, let constraints == ["loves pies"] and # pre_seq_1 = "The child loves pies and" pre_seq_2 = "The child plays in the playground and" # Without this step, if `sent_beam_indices` is something like [1,1], then # 1. `pre_seq_1` won't be added to the list of (topk) hypothesis since it's not in the indices and # 2. it won't be added to the list of (advance) hypothesis since it's completed already. (this is # the else part of `if constraints_completed[seq_idx]`) # 3. it ends up simply getting removed from consideration. # #3 might be fine and actually desired, since it's likely that it's a low-probability output anyways, # especially if it's not in the list of `sent_beam_indices`. But this often leads to lengthened beam # search times, since completed sequences keep getting removed after all this effort for constrained # generation. # Here, we basically take `pre_seq_1` and to "push" it into the considered list of hypotheses, by simply # appending the next likely token in the vocabulary and adding it to the list of hypotheses. new_score, new_token = torch.max(this_batch_token_scores[seq_idx], 0) # some next probable token advance_seq = torch.cat((pre_seq, new_token.unsqueeze(0)), -1) advance_state = advance_constraint_states[seq_idx] advance_seq = advance_seq.cpu().tolist() advance_state.reset(advance_seq) if advance_seq not in track_new["new_seqs"]: # but still don't want to have duplicates track_new["new_seqs"].append(advance_seq) track_new["new_indices"].append(seq_idx) track_new["new_tokens"].append(new_token) track_new["new_scores"].append(new_score) track_new["new_states"].append(advance_state) if len(track_new["new_indices"]) > 0: new_indices = torch.tensor(track_new["new_indices"]).to(device) new_tokens = torch.stack(track_new["new_tokens"]).to(device) new_scores = torch.stack(track_new["new_scores"]).to(device) all_states = topk_contraint_states + track_new["new_states"] all_tokens = torch.cat((sent_beam_tokens, new_tokens), -1) all_scores = torch.cat((sent_beam_scores, new_scores), -1) all_banks = torch.tensor([one.get_bank() for one in all_states]).to(device) zipped = all_banks * 100 + all_scores indices = zipped.sort(descending=True).indices sorted_banks = all_banks[indices] # Then we end up with {sorted among bank C}, {sorted among bank C-1}, ..., {sorted among bank 0} counter = -1 cur_bank = sorted_banks[0] increments = [] for bank in sorted_banks: if bank == cur_bank: counter += 1 else: counter = 0 cur_bank = bank increments.append(counter) rearrangers = torch.tensor(np.argsort(increments, kind="mergesort")) indices = indices[rearrangers][:orig_len] sent_beam_scores = all_scores[indices] sent_beam_tokens = all_tokens[indices] sent_beam_indices = torch.cat((sent_beam_indices, new_indices))[indices] return sent_beam_scores, sent_beam_tokens, sent_beam_indices def finalize( self, input_ids: torch.LongTensor, final_beam_scores: torch.FloatTensor, final_beam_tokens: torch.LongTensor, final_beam_indices: torch.LongTensor, max_length: int, pad_token_id: Optional[int] = None, eos_token_id: Optional[Union[int, List[int]]] = None, ) -> Tuple[torch.LongTensor]: batch_size = len(self._beam_hyps) if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] # finalize all open beam hypotheses and add to generated hypotheses for batch_idx, beam_hyp in enumerate(self._beam_hyps): if self._done[batch_idx]: continue # all open beam hypotheses are added to the beam hypothesis # beam hypothesis class automatically keeps the best beams ids_collect = [] for beam_id in range(self.num_beams): batch_beam_idx = batch_idx * self.num_beams + beam_id final_score = final_beam_scores[batch_beam_idx].item() final_tokens = input_ids[batch_beam_idx] completes_constraint = self.check_completes_constraints(final_tokens.cpu().tolist()) if completes_constraint: beam_hyp.add(final_tokens, final_score) ids_collect.append(beam_id) # due to overly complex constraints or other factors, sometimes we can't gaurantee a successful # generation. In these cases we simply return the highest scoring outputs. if len(ids_collect) < self.num_beam_hyps_to_keep: for beam_id in range(self.num_beams): if beam_id not in ids_collect: batch_beam_idx = batch_idx * self.num_beams + beam_id final_score = final_beam_scores[batch_beam_idx].item() final_tokens = input_ids[batch_beam_idx] beam_hyp.add(final_tokens, final_score) if len(ids_collect) >= self.num_beam_hyps_to_keep: break # select the best hypotheses sent_lengths = input_ids.new(batch_size * self.num_beam_hyps_to_keep) best = [] best_scores = torch.zeros(batch_size * self.num_beam_hyps_to_keep, device=self.device, dtype=torch.float32) # retrieve best hypotheses for i, beam_hyp in enumerate(self._beam_hyps): sorted_hyps = sorted(beam_hyp.beams, key=lambda x: x[0]) for j in range(self.num_beam_hyps_to_keep): best_hyp_tuple = sorted_hyps.pop() best_score = best_hyp_tuple[0] best_hyp = best_hyp_tuple[1] sent_lengths[self.num_beam_hyps_to_keep * i + j] = len(best_hyp) # append to lists best.append(best_hyp) best_scores[i * self.num_beam_hyps_to_keep + j] = best_score # prepare for adding eos sent_lengths_max = sent_lengths.max().item() + 1 sent_max_len = min(sent_lengths_max, max_length) if max_length is not None else sent_lengths_max decoded: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len) # shorter batches are padded if needed if sent_lengths.min().item() != sent_lengths.max().item(): if pad_token_id is None: raise ValueError("`pad_token_id` has to be defined") decoded.fill_(pad_token_id) # fill with hypotheses and eos_token_id if the latter fits in for i, hypo in enumerate(best): decoded[i, : sent_lengths[i]] = hypo if sent_lengths[i] < sent_max_len: # inserting only the first eos_token_id decoded[i, sent_lengths[i]] = eos_token_id[0] return UserDict( { "sequences": decoded, "sequence_scores": best_scores, } ) class BeamHypotheses: def __init__(self, num_beams: int, length_penalty: float, early_stopping: bool, max_length: Optional[int] = None): """ Initialize n-best list of hypotheses. """ self.length_penalty = length_penalty self.early_stopping = early_stopping self.max_length = max_length self.num_beams = num_beams self.beams = [] self.worst_score = 1e9 if not isinstance(self.early_stopping, bool) and self.max_length is None: raise ValueError( "When `do_early_stopping` is set to a string, `max_length` must be defined. Ensure it is passed to the" " BeamScorer class instance at initialization time." ) def __len__(self): """ Number of hypotheses in the list. """ return len(self.beams) def add(self, hyp: torch.LongTensor, sum_logprobs: float, beam_indices: Optional[torch.LongTensor] = None): """ Add a new hypothesis to the list. """ score = sum_logprobs / (hyp.shape[-1] ** self.length_penalty) if len(self) < self.num_beams or score > self.worst_score: self.beams.append((score, hyp, beam_indices)) if len(self) > self.num_beams: sorted_next_scores = sorted([(s, idx) for idx, (s, _, _) in enumerate(self.beams)]) del self.beams[sorted_next_scores[0][1]] self.worst_score = sorted_next_scores[1][0] else: self.worst_score = min(score, self.worst_score) def is_done(self, best_sum_logprobs: float, cur_len: int) -> bool: """ If there are enough hypotheses and that none of the hypotheses being generated can become better than the worst one in the heap, then we are done with this sentence. """ if len(self) < self.num_beams: return False # `True`: stop as soon as at least `num_beams` hypotheses are finished if self.early_stopping is True: return True # `False`: heuristic -- compute best possible score from `cur_len`, even though it is not entirely accurate # when `length_penalty` is positive. See the discussion below for more details. # https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565 elif self.early_stopping is False: highest_attainable_score = best_sum_logprobs / cur_len**self.length_penalty ret = self.worst_score >= highest_attainable_score return ret # `"never"`: compute the best possible score, depending on the signal of `length_penalty` else: # `length_penalty` > 0.0 -> max denominator is obtaned from `max_length`, not from `cur_len` -> min # abs(`highest_attainable_score`) is obtained -> `highest_attainable_score` is negative, hence we obtain # its max this way if self.length_penalty > 0.0: highest_attainable_score = best_sum_logprobs / self.max_length**self.length_penalty # the opposite logic applies here (max `highest_attainable_score` from `cur_len`) else: highest_attainable_score = best_sum_logprobs / cur_len**self.length_penalty ret = self.worst_score >= highest_attainable_score return ret
45,955
47.527983
127
py
transformers
transformers-main/src/transformers/generation/__init__.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available _import_structure = { "configuration_utils": ["GenerationConfig"], "streamers": ["TextIteratorStreamer", "TextStreamer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["beam_constraints"] = [ "Constraint", "ConstraintListState", "DisjunctiveConstraint", "PhrasalConstraint", ] _import_structure["beam_search"] = [ "BeamHypotheses", "BeamScorer", "BeamSearchScorer", "ConstrainedBeamSearchScorer", ] _import_structure["logits_process"] = [ "EpsilonLogitsWarper", "EtaLogitsWarper", "ForcedBOSTokenLogitsProcessor", "ForcedEOSTokenLogitsProcessor", "HammingDiversityLogitsProcessor", "InfNanRemoveLogitsProcessor", "LogitsProcessor", "LogitsProcessorList", "LogitsWarper", "MinLengthLogitsProcessor", "MinNewTokensLengthLogitsProcessor", "NoBadWordsLogitsProcessor", "NoRepeatNGramLogitsProcessor", "PrefixConstrainedLogitsProcessor", "RepetitionPenaltyLogitsProcessor", "SequenceBiasLogitsProcessor", "EncoderRepetitionPenaltyLogitsProcessor", "TemperatureLogitsWarper", "TopKLogitsWarper", "TopPLogitsWarper", "TypicalLogitsWarper", "EncoderNoRepeatNGramLogitsProcessor", "ExponentialDecayLengthPenalty", "LogitNormalization", ] _import_structure["stopping_criteria"] = [ "MaxNewTokensCriteria", "MaxLengthCriteria", "MaxTimeCriteria", "StoppingCriteria", "StoppingCriteriaList", "validate_stopping_criteria", ] _import_structure["utils"] = [ "GenerationMixin", "top_k_top_p_filtering", "GreedySearchEncoderDecoderOutput", "GreedySearchDecoderOnlyOutput", "SampleEncoderDecoderOutput", "SampleDecoderOnlyOutput", "BeamSearchEncoderDecoderOutput", "BeamSearchDecoderOnlyOutput", "BeamSampleEncoderDecoderOutput", "BeamSampleDecoderOnlyOutput", "ContrastiveSearchEncoderDecoderOutput", "ContrastiveSearchDecoderOnlyOutput", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tf_logits_process"] = [ "TFForcedBOSTokenLogitsProcessor", "TFForcedEOSTokenLogitsProcessor", "TFLogitsProcessor", "TFLogitsProcessorList", "TFLogitsWarper", "TFMinLengthLogitsProcessor", "TFNoBadWordsLogitsProcessor", "TFNoRepeatNGramLogitsProcessor", "TFRepetitionPenaltyLogitsProcessor", "TFTemperatureLogitsWarper", "TFTopKLogitsWarper", "TFTopPLogitsWarper", "TFForceTokensLogitsProcessor", "TFSuppressTokensAtBeginLogitsProcessor", "TFSuppressTokensLogitsProcessor", ] _import_structure["tf_utils"] = [ "TFGenerationMixin", "tf_top_k_top_p_filtering", "TFGreedySearchDecoderOnlyOutput", "TFGreedySearchEncoderDecoderOutput", "TFSampleEncoderDecoderOutput", "TFSampleDecoderOnlyOutput", "TFBeamSearchEncoderDecoderOutput", "TFBeamSearchDecoderOnlyOutput", "TFBeamSampleEncoderDecoderOutput", "TFBeamSampleDecoderOnlyOutput", "TFContrastiveSearchEncoderDecoderOutput", "TFContrastiveSearchDecoderOnlyOutput", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["flax_logits_process"] = [ "FlaxForcedBOSTokenLogitsProcessor", "FlaxForcedEOSTokenLogitsProcessor", "FlaxLogitsProcessor", "FlaxLogitsProcessorList", "FlaxLogitsWarper", "FlaxMinLengthLogitsProcessor", "FlaxTemperatureLogitsWarper", "FlaxTopKLogitsWarper", "FlaxTopPLogitsWarper", ] _import_structure["flax_utils"] = [ "FlaxGenerationMixin", "FlaxGreedySearchOutput", "FlaxSampleOutput", "FlaxBeamSearchOutput", ] if TYPE_CHECKING: from .configuration_utils import GenerationConfig from .streamers import TextIteratorStreamer, TextStreamer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .beam_constraints import Constraint, ConstraintListState, DisjunctiveConstraint, PhrasalConstraint from .beam_search import BeamHypotheses, BeamScorer, BeamSearchScorer, ConstrainedBeamSearchScorer from .logits_process import ( EncoderNoRepeatNGramLogitsProcessor, EncoderRepetitionPenaltyLogitsProcessor, EpsilonLogitsWarper, EtaLogitsWarper, ExponentialDecayLengthPenalty, ForcedBOSTokenLogitsProcessor, ForcedEOSTokenLogitsProcessor, HammingDiversityLogitsProcessor, InfNanRemoveLogitsProcessor, LogitNormalization, LogitsProcessor, LogitsProcessorList, LogitsWarper, MinLengthLogitsProcessor, MinNewTokensLengthLogitsProcessor, NoBadWordsLogitsProcessor, NoRepeatNGramLogitsProcessor, PrefixConstrainedLogitsProcessor, RepetitionPenaltyLogitsProcessor, SequenceBiasLogitsProcessor, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, TypicalLogitsWarper, ) from .stopping_criteria import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteria, StoppingCriteriaList, validate_stopping_criteria, ) from .utils import ( BeamSampleDecoderOnlyOutput, BeamSampleEncoderDecoderOutput, BeamSearchDecoderOnlyOutput, BeamSearchEncoderDecoderOutput, ContrastiveSearchDecoderOnlyOutput, ContrastiveSearchEncoderDecoderOutput, GenerationMixin, GreedySearchDecoderOnlyOutput, GreedySearchEncoderDecoderOutput, SampleDecoderOnlyOutput, SampleEncoderDecoderOutput, top_k_top_p_filtering, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tf_logits_process import ( TFForcedBOSTokenLogitsProcessor, TFForcedEOSTokenLogitsProcessor, TFForceTokensLogitsProcessor, TFLogitsProcessor, TFLogitsProcessorList, TFLogitsWarper, TFMinLengthLogitsProcessor, TFNoBadWordsLogitsProcessor, TFNoRepeatNGramLogitsProcessor, TFRepetitionPenaltyLogitsProcessor, TFSuppressTokensAtBeginLogitsProcessor, TFSuppressTokensLogitsProcessor, TFTemperatureLogitsWarper, TFTopKLogitsWarper, TFTopPLogitsWarper, ) from .tf_utils import ( TFBeamSampleDecoderOnlyOutput, TFBeamSampleEncoderDecoderOutput, TFBeamSearchDecoderOnlyOutput, TFBeamSearchEncoderDecoderOutput, TFContrastiveSearchDecoderOnlyOutput, TFContrastiveSearchEncoderDecoderOutput, TFGenerationMixin, TFGreedySearchDecoderOnlyOutput, TFGreedySearchEncoderDecoderOutput, TFSampleDecoderOnlyOutput, TFSampleEncoderDecoderOutput, tf_top_k_top_p_filtering, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .flax_logits_process import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessor, FlaxLogitsProcessorList, FlaxLogitsWarper, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) from .flax_utils import FlaxBeamSearchOutput, FlaxGenerationMixin, FlaxGreedySearchOutput, FlaxSampleOutput else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
9,515
33.603636
119
py
transformers
transformers-main/src/transformers/generation/stopping_criteria.py
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging logger = logging.get_logger(__name__) STOPPING_CRITERIA_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. """ class StoppingCriteria(ABC): """Abstract base class for all stopping criteria that can be applied during generation.""" @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed") class MaxLengthCriteria(StoppingCriteria): """ This class can be used to stop generation whenever the full generated number of tokens exceeds `max_length`. Keep in mind for decoder-only type of transformers, this will include the initial prompted tokens. Args: max_length (`int`): The maximum length that the output sequence can have in number of tokens. max_position_embeddings (`int`, `optional`): The maximum model length, as defined by the model's `config.max_position_embeddings` attribute. """ def __init__(self, max_length: int, max_position_embeddings: Optional[int] = None): self.max_length = max_length self.max_position_embeddings = max_position_embeddings @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: cur_len = input_ids.shape[-1] is_done = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " f"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe " "exceptions, performance degradation, or nothing at all." ) return is_done class MaxNewTokensCriteria(StoppingCriteria): """ This class can be used to stop generation whenever the generated number of tokens exceeds `max_new_tokens`. Keep in mind for decoder-only type of transformers, this will **not** include the initial prompted tokens. This is very close to `MaxLengthCriteria` but ignores the number of initial tokens. Args: start_length (`int`): The number of initial tokens. max_new_tokens (`int`): The maximum number of tokens to generate. """ def __init__(self, start_length: int, max_new_tokens: int): warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " f"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` " "with `max_length = start_length + max_new_tokens` instead.", FutureWarning, ) self.start_length = start_length self.max_new_tokens = max_new_tokens self.max_length = start_length + max_new_tokens @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: return input_ids.shape[-1] >= self.max_length class MaxTimeCriteria(StoppingCriteria): """ This class can be used to stop generation whenever the full generation exceeds some amount of time. By default, the time will start being counted when you initialize this function. You can override this by passing an `initial_time`. Args: max_time (`float`): The maximum allowed time in seconds for the generation. initial_time (`float`, *optional*, defaults to `time.time()`): The start of the generation allowed time. """ def __init__(self, max_time: float, initial_timestamp: Optional[float] = None): self.max_time = max_time self.initial_timestamp = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: return time.time() - self.initial_timestamp > self.max_time class StoppingCriteriaList(list): @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: return any(criteria(input_ids, scores) for criteria in self) @property def max_length(self) -> Optional[int]: for stopping_criterium in self: if isinstance(stopping_criterium, MaxLengthCriteria): return stopping_criterium.max_length elif isinstance(stopping_criterium, MaxNewTokensCriteria): return stopping_criterium.max_length return None def validate_stopping_criteria(stopping_criteria: StoppingCriteriaList, max_length: int) -> StoppingCriteriaList: stopping_max_length = stopping_criteria.max_length new_stopping_criteria = deepcopy(stopping_criteria) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter", UserWarning) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=max_length)) return new_stopping_criteria
6,373
42.360544
119
py
transformers
transformers-main/src/transformers/generation/flax_logits_process.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger logger = get_logger(__name__) LOGITS_PROCESSOR_INPUTS_DOCSTRING = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs (`Dict[str, Any]`, *optional*): Additional logits processor specific kwargs. Return: `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. """ class FlaxLogitsProcessor: """Abstract base class for all logit processors that can be applied during generation.""" @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray) -> jnp.ndarray: """Flax method for processing logits.""" raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) class FlaxLogitsWarper: """Abstract base class for all logit warpers that can be applied during generation with multinomial sampling.""" @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray) -> jnp.ndarray: """Flax method for warping logits.""" raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) class FlaxLogitsProcessorList(list): """ This class can be used to create a list of [`FlaxLogitsProcessor`] or [`FlaxLogitsWarper`] to subsequently process a `scores` input tensor. This class inherits from list and adds a specific *__call__* method to apply each [`FlaxLogitsProcessor`] or [`FlaxLogitsWarper`] to the inputs. """ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int, **kwargs) -> jnp.ndarray: for processor in self: function_args = inspect.signature(processor.__call__).parameters if len(function_args) > 3: if not all(arg in kwargs for arg in list(function_args.keys())[2:]): raise ValueError( f"Make sure that all the required parameters: {list(function_args.keys())} for " f"{processor.__class__} are passed to the logits processor." ) scores = processor(input_ids, scores, cur_len, **kwargs) else: scores = processor(input_ids, scores, cur_len) return scores class FlaxTemperatureLogitsWarper(FlaxLogitsWarper): r""" [`FlaxLogitsWarper`] for temperature (exponential scaling output probability distribution). Args: temperature (`float`): The value used to module the logits distribution. """ def __init__(self, temperature: float): if not isinstance(temperature, float) or not (temperature > 0): raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}") self.temperature = temperature def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: scores = scores / self.temperature return scores class FlaxTopPLogitsWarper(FlaxLogitsWarper): """ [`FlaxLogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off. Args: top_p (`float`): If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are kept for generation. filter_value (`float`, *optional*, defaults to `-float("Inf")`): All filtered values will be set to this float value. min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimum number of tokens that cannot be filtered. """ def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): if not isinstance(top_p, float) or (top_p < 0 or top_p > 1.0): raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}") if not isinstance(min_tokens_to_keep, int) or (min_tokens_to_keep < 1): raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}") self.top_p = top_p self.filter_value = filter_value self.min_tokens_to_keep = min_tokens_to_keep def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: topk_scores, topk_indices = lax.top_k(scores, scores.shape[-1]) mask_scores = jnp.full_like(scores, self.filter_value) cumulative_probs = jax.nn.softmax(topk_scores, axis=-1).cumsum(axis=-1) score_mask = cumulative_probs < self.top_p # include the token that is higher than top_p as well score_mask = jnp.roll(score_mask, 1) score_mask |= score_mask.at[:, 0].set(True) # min tokens to keep score_mask = score_mask.at[:, : self.min_tokens_to_keep].set(True) topk_next_scores = jnp.where(score_mask, topk_scores, mask_scores) next_scores = jax.lax.sort_key_val(topk_indices, topk_next_scores)[-1] return next_scores class FlaxTopKLogitsWarper(FlaxLogitsWarper): r""" [`FlaxLogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements. Args: top_k (`int`): The number of highest probability vocabulary tokens to keep for top-k-filtering. filter_value (`float`, *optional*, defaults to `-float("Inf")`): All filtered values will be set to this float value. min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimum number of tokens that cannot be filtered. """ def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): if not isinstance(top_k, int) or top_k <= 0: raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}") self.top_k = max(top_k, min_tokens_to_keep) self.filter_value = filter_value def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: batch_size, vocab_size = scores.shape next_scores_flat = jnp.full(batch_size * vocab_size, self.filter_value) topk = min(self.top_k, scores.shape[-1]) # Safety check topk_scores, topk_indices = lax.top_k(scores, topk) shift = jnp.broadcast_to((jnp.arange(batch_size) * vocab_size)[:, None], (batch_size, topk)).flatten() topk_scores_flat = topk_scores.flatten() topk_indices_flat = topk_indices.flatten() + shift next_scores_flat = next_scores_flat.at[topk_indices_flat].set(topk_scores_flat) next_scores = next_scores_flat.reshape(batch_size, vocab_size) return next_scores class FlaxForcedBOSTokenLogitsProcessor(FlaxLogitsProcessor): r""" [`FlaxLogitsProcessor`] that enforces the specified token as the first generated token. Args: bos_token_id (`int`): The id of the token to force as the first generated token. """ def __init__(self, bos_token_id: int): self.bos_token_id = bos_token_id def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: new_scores = jnp.full(scores.shape, -float("inf")) apply_penalty = 1 - jnp.bool_(cur_len - 1) scores = jnp.where(apply_penalty, new_scores.at[:, self.bos_token_id].set(0), scores) return scores class FlaxForcedEOSTokenLogitsProcessor(FlaxLogitsProcessor): r""" [`FlaxLogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached. Args: max_length (`int`): The maximum length of the sequence to be generated. eos_token_id (`int`): The id of the token to force as the last generated token when `max_length` is reached. """ def __init__(self, max_length: int, eos_token_id: int): self.max_length = max_length self.eos_token_id = eos_token_id def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: new_scores = jnp.full(scores.shape, -float("inf")) apply_penalty = 1 - jnp.bool_(cur_len - self.max_length + 1) scores = jnp.where(apply_penalty, new_scores.at[:, self.eos_token_id].set(0), scores) return scores class FlaxMinLengthLogitsProcessor(FlaxLogitsProcessor): r""" [`FlaxLogitsProcessor`] enforcing a min-length by setting EOS probability to 0. Args: min_length (`int`): The minimum length below which the score of `eos_token_id` is set to `-float("Inf")`. eos_token_id (`int`): The id of the *end-of-sequence* token. """ def __init__(self, min_length: int, eos_token_id: int): if not isinstance(min_length, int) or min_length < 0: raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}") if not isinstance(eos_token_id, int) or eos_token_id < 0: raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}") self.min_length = min_length self.eos_token_id = eos_token_id def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: # create boolean flag to decide if min length penalty should be applied apply_penalty = 1 - jnp.clip(cur_len - self.min_length, 0, 1) scores = jnp.where(apply_penalty, scores.at[:, self.eos_token_id].set(-float("inf")), scores) return scores class FlaxSuppressTokensAtBeginLogitsProcessor(FlaxLogitsProcessor): r""" [`FlaxLogitsProcessor`] supressing a list of tokens as soon as the `generate` function starts generating using `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` are not sampled at the begining of the generation. Args: begin_suppress_tokens (`List[int]`): Tokens to not sample. begin_index (`int`): Index where the tokens are suppressed. """ def __init__(self, begin_suppress_tokens, begin_index): self.begin_suppress_tokens = list(begin_suppress_tokens) self.begin_index = begin_index def __call__(self, input_ids, scores, cur_len: int): apply_penalty = 1 - jnp.bool_(cur_len - self.begin_index) scores = jnp.where(apply_penalty, scores.at[:, self.begin_suppress_tokens].set(-float("inf")), scores) return scores class FlaxSuppressTokensLogitsProcessor(FlaxLogitsProcessor): r""" [`FlaxLogitsProcessor`] suppressing a list of tokens at each decoding step. The processor will set their log probs to be `-inf` so they are not sampled. Args: suppress_tokens (`list`): Tokens to not sample. """ def __init__(self, suppress_tokens: list): self.suppress_tokens = list(suppress_tokens) def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: scores = scores.at[..., self.suppress_tokens].set(-float("inf")) return scores class FlaxForceTokensLogitsProcessor(FlaxLogitsProcessor): r""" [`FlaxLogitsProcessor`] that takes a list of pairs of integers which indicates a mapping from generation indices to token indices that will be forced before sampling. The processor will set their log probs to 0 and all other tokens to `-inf` so that they are sampled at their corresponding index. Args: force_token_map (`list`): Map giving token ids and indices where they will be forced to be sampled. """ def __init__(self, force_token_map): force_token_map = dict(force_token_map) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. force_token_array = jnp.ones((max(force_token_map.keys()) + 1), dtype=jnp.int32) * -1 for index, token in force_token_map.items(): if token is not None: force_token_array = force_token_array.at[index].set(token) self.force_token_array = jnp.int32(force_token_array) def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: def _force_token(generation_idx): batch_size = scores.shape[0] current_token = self.force_token_array[generation_idx] new_scores = jnp.ones_like(scores, dtype=scores.dtype) * -float("inf") updates = jnp.zeros((batch_size, 1), dtype=scores.dtype) new_scores = lax.dynamic_update_slice(new_scores, updates, (0, current_token)) return new_scores scores = lax.cond( cur_len >= self.force_token_array.shape[0], # If the current length is geq than the length of force_token_array, the processor does nothing. lambda: scores, # Otherwise, it may force a certain token. lambda: lax.cond( self.force_token_array[cur_len] >= 0, # Only valid (positive) tokens are forced lambda: _force_token(cur_len), # Otherwise, the processor does nothing. lambda: scores, ), ) return scores class FlaxWhisperTimeStampLogitsProcessor(FlaxLogitsProcessor): r""" Whisper specific Processor. This processor can be used to force a list of tokens. The processor will set their log probs to `inf` so that they are sampled at their corresponding index. Args: generate_config (`GenerateConfig`): The generate config used to generate the output. The following parameters are required: eos_token_id (`int`, *optional*, defaults to 50257): The id of the *end-of-sequence* token. no_timestamps_token_id (`int`, *optional*, defaults to 50363): The id of the `"<|notimestamps|>"` token. max_initial_timestamp_index (`int`, *optional*, defaults to 1): Used to set the maximum value of the initial timestamp. This is used to prevent the model from predicting timestamps that are too far in the future. """ def __init__(self, generate_config, model_config, decoder_input_length): self.eos_token_id = generate_config.eos_token_id self.no_timestamps_token_id = generate_config.no_timestamps_token_id self.timestamp_begin = generate_config.no_timestamps_token_id + 1 self.begin_index = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(generate_config, "max_initial_timestamp_index"): self.max_initial_timestamp_index = generate_config.max_initial_timestamp_index else: self.max_initial_timestamp_index = model_config.vocab_size if self.max_initial_timestamp_index is None: self.max_initial_timestamp_index = model_config.vocab_size def __call__(self, input_ids, scores, cur_len): # suppress <|notimestamps|> which is handled by without_timestamps scores = scores.at[:, self.no_timestamps_token_id].set(-float("inf")) def handle_pairs(input_ids_k, scores_k): last_was_timestamp = jnp.where((cur_len - self.begin_index) >= 1, True, False) last_was_timestamp = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin, True and last_was_timestamp, False, ) penultimate_was_timestamp = jnp.where((cur_len - self.begin_index) < 2, True, False) penultimate_was_timestamp = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin, True, penultimate_was_timestamp, ) return jnp.where( last_was_timestamp, jnp.where( penultimate_was_timestamp > 0, scores_k.at[self.timestamp_begin :].set(-float("inf")), scores_k.at[: self.eos_token_id].set(-float("inf")), ), scores_k, ) scores = jax.vmap(handle_pairs)(input_ids, scores) apply_max_initial_timestamp = jnp.where(cur_len == self.begin_index, True, False) apply_max_initial_timestamp = jnp.where( self.max_initial_timestamp_index is not None, True and apply_max_initial_timestamp, False, ) last_allowed = self.timestamp_begin + self.max_initial_timestamp_index scores = jnp.where( apply_max_initial_timestamp, scores.at[:, last_allowed + 1 :].set(-float("inf")), scores, ) # if sum of probability over timestamps is above any other token, sample timestamp logprobs = jax.nn.log_softmax(scores, axis=-1) def handle_cumulative_probs(logprobs_k, scores_k): timestamp_logprob = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :], axis=-1) max_text_token_logprob = jnp.max(logprobs_k[: self.timestamp_begin]) return jnp.where( timestamp_logprob > max_text_token_logprob, scores_k.at[: self.timestamp_begin].set(-float("inf")), scores_k, ) scores = jax.vmap(handle_cumulative_probs)(logprobs, scores) return scores
19,218
40.962882
119
py
transformers
transformers-main/src/transformers/generation/logits_process.py
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import math from typing import Callable, Dict, Iterable, List, Tuple, Union import numpy as np import torch from ..utils import add_start_docstrings from ..utils.logging import get_logger logger = get_logger(__name__) LOGITS_PROCESSOR_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search Return: `torch.FloatTensor` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. """ class LogitsProcessor: """Abstract base class for all logit processors that can be applied during generation.""" @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) class LogitsWarper: """Abstract base class for all logit warpers that can be applied during generation with multinomial sampling.""" @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) class LogitsProcessorList(list): """ This class can be used to create a list of [`LogitsProcessor`] or [`LogitsWarper`] to subsequently process a `scores` input tensor. This class inherits from list and adds a specific *__call__* method to apply each [`LogitsProcessor`] or [`LogitsWarper`] to the inputs. """ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.FloatTensor: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs (`Dict[str, Any]`, *optional*): Additional kwargs that are specific to a logits processor. Return: `torch.FloatTensor` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. """ for processor in self: function_args = inspect.signature(processor.__call__).parameters if len(function_args) > 2: if not all(arg in kwargs for arg in list(function_args.keys())[2:]): raise ValueError( f"Make sure that all the required parameters: {list(function_args.keys())} for " f"{processor.__class__} are passed to the logits processor." ) scores = processor(input_ids, scores, **kwargs) else: scores = processor(input_ids, scores) return scores class MinLengthLogitsProcessor(LogitsProcessor): r""" [`LogitsProcessor`] enforcing a min-length by setting EOS probability to 0. Args: min_length (`int`): The minimum length below which the score of `eos_token_id` is set to `-float("Inf")`. eos_token_id (`Union[int, List[int]]`): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. """ def __init__(self, min_length: int, eos_token_id: Union[int, List[int]]): if not isinstance(min_length, int) or min_length < 0: raise ValueError(f"`min_length` has to be a non-negative integer, but is {min_length}") if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] if not all(isinstance(i, int) for i in eos_token_id) or any(i < 0 for i in eos_token_id): logger.warning(f"`eos_token_id` has to be a list of positive integers, but is {eos_token_id}") self.min_length = min_length self.eos_token_id = eos_token_id @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: cur_len = input_ids.shape[-1] if cur_len < self.min_length: for i in self.eos_token_id: scores[:, i] = -float("inf") return scores class MinNewTokensLengthLogitsProcessor(LogitsProcessor): r""" [`LogitsProcessor`] enforcing a min-length of new tokens by setting EOS (End-Of-Sequence) token probability to 0. Args: prompt_length_to_skip (`int`): The input tokens length. min_new_tokens (`int`): The minimum *new* tokens length below which the score of `eos_token_id` is set to `-float("Inf")`. eos_token_id (`Union[int, List[int]]`): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. """ def __init__(self, prompt_length_to_skip: int, min_new_tokens: int, eos_token_id: Union[int, List[int]]): for arg_name, arg_value in [ ("prompt_length_to_skip", prompt_length_to_skip), ("min_new_tokens", min_new_tokens), ]: if not isinstance(arg_value, int) or arg_value < 0: raise ValueError(f"`{arg_name}` has to be a positive integer, but is {arg_value}") if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] if not all(isinstance(i, int) for i in eos_token_id) or any(i < 0 for i in eos_token_id): logger.warning(f"`eos_token_id` has to be a list of positive integers, but is {eos_token_id}") self.prompt_length_to_skip = prompt_length_to_skip self.min_new_tokens = min_new_tokens self.eos_token_id = eos_token_id @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: new_tokens_length = input_ids.shape[-1] - self.prompt_length_to_skip if new_tokens_length < self.min_new_tokens: for i in self.eos_token_id: scores[:, i] = -float("inf") return scores class TemperatureLogitsWarper(LogitsWarper): r""" [`LogitsWarper`] for temperature (exponential scaling output probability distribution). Args: temperature (`float`): The value used to module the logits distribution. """ def __init__(self, temperature: float): if not isinstance(temperature, float) or not (temperature > 0): raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}") self.temperature = temperature @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: scores = scores / self.temperature return scores class RepetitionPenaltyLogitsProcessor(LogitsProcessor): r""" [`LogitsProcessor`] enforcing an exponential penalty on repeated sequences. Args: repetition_penalty (`float`): The parameter for repetition penalty. 1.0 means no penalty. See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. """ def __init__(self, penalty: float): if not isinstance(penalty, float) or not (penalty > 0): raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}") self.penalty = penalty @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: score = torch.gather(scores, 1, input_ids) # if score < 0 then repetition penalty has to be multiplied to reduce the previous token probability score = torch.where(score < 0, score * self.penalty, score / self.penalty) scores.scatter_(1, input_ids, score) return scores class EncoderRepetitionPenaltyLogitsProcessor(LogitsProcessor): r""" [`LogitsProcessor`] enforcing an exponential penalty on tokens that are not in the original input. Args: hallucination_penalty (`float`): The parameter for hallucination penalty. 1.0 means no penalty. encoder_input_ids (`torch.LongTensor`): The encoder_input_ids that should not be repeated within the decoder ids. """ def __init__(self, penalty: float, encoder_input_ids: torch.LongTensor): if not isinstance(penalty, float) or not (penalty > 0): raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}") self.penalty = 1 / penalty self.encoder_input_ids = encoder_input_ids @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: score = torch.gather(scores, 1, self.encoder_input_ids) # if score < 0 then repetition penalty has to be multiplied to reduce the previous token probability score = torch.where(score < 0, score * self.penalty, score / self.penalty) scores.scatter_(1, self.encoder_input_ids, score) return scores class TopPLogitsWarper(LogitsWarper): """ [`LogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off. Args: top_p (`float`): If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are kept for generation. filter_value (`float`, *optional*, defaults to `-float("Inf")`): All filtered values will be set to this float value. min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimum number of tokens that cannot be filtered. """ def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): top_p = float(top_p) if top_p < 0 or top_p > 1.0: raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}") if not isinstance(min_tokens_to_keep, int) or (min_tokens_to_keep < 1): raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}") self.top_p = top_p self.filter_value = filter_value self.min_tokens_to_keep = min_tokens_to_keep @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: sorted_logits, sorted_indices = torch.sort(scores, descending=False) cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1) # Remove tokens with cumulative top_p above the threshold (token with 0 are kept) sorted_indices_to_remove = cumulative_probs <= (1 - self.top_p) # Keep at least min_tokens_to_keep sorted_indices_to_remove[..., -self.min_tokens_to_keep :] = 0 # scatter sorted tensors to original indexing indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove) scores = scores.masked_fill(indices_to_remove, self.filter_value) return scores class TopKLogitsWarper(LogitsWarper): r""" [`LogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements. Args: top_k (`int`): The number of highest probability vocabulary tokens to keep for top-k-filtering. filter_value (`float`, *optional*, defaults to `-float("Inf")`): All filtered values will be set to this float value. min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimum number of tokens that cannot be filtered. """ def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): if not isinstance(top_k, int) or top_k <= 0: raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}") self.top_k = max(top_k, min_tokens_to_keep) self.filter_value = filter_value @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: top_k = min(self.top_k, scores.size(-1)) # Safety check # Remove all tokens with a probability less than the last token of the top-k indices_to_remove = scores < torch.topk(scores, top_k)[0][..., -1, None] scores = scores.masked_fill(indices_to_remove, self.filter_value) return scores class TypicalLogitsWarper(LogitsWarper): r""" [`LogitsWarper`] that performs typical decoding. See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information. Args: mass (`float`): Value of typical_p between 0 and 1 inclusive, defaults to 0.9. filter_value (`float`, *optional*, defaults to `-float("Inf")`): All filtered values will be set to this float value. min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimum number of tokens that cannot be filtered. """ def __init__(self, mass: float = 0.9, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): mass = float(mass) if not (mass > 0 and mass < 1): raise ValueError(f"`typical_p` has to be a float > 0 and < 1, but is {mass}") if not isinstance(min_tokens_to_keep, int) or (min_tokens_to_keep < 1): raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}") self.filter_value = filter_value self.mass = mass self.min_tokens_to_keep = min_tokens_to_keep @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: # calculate entropy normalized = torch.nn.functional.log_softmax(scores, dim=-1) p = torch.exp(normalized) ent = -(normalized * p).nansum(-1, keepdim=True) # shift and sort shifted_scores = torch.abs((-normalized) - ent) sorted_scores, sorted_indices = torch.sort(shifted_scores, descending=False) sorted_logits = scores.gather(-1, sorted_indices) cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1) # Remove tokens with cumulative mass above the threshold last_ind = (cumulative_probs < self.mass).sum(dim=1) last_ind[last_ind < 0] = 0 sorted_indices_to_remove = sorted_scores > sorted_scores.gather(1, last_ind.view(-1, 1)) sorted_indices_to_remove[..., : self.min_tokens_to_keep] = 0 indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove) scores = scores.masked_fill(indices_to_remove, self.filter_value) return scores class EpsilonLogitsWarper(LogitsWarper): r""" [`LogitsWarper`] that performs epsilon-sampling, i.e. restricting to tokens with `prob >= epsilon`. Takes the largest min_tokens_to_keep tokens if no tokens satisfy this constraint. See [Truncation Sampling as Language Model Desmoothing](https://arxiv.org/abs/2210.15191) for more information. Args: epsilon (`float`): If set to > 0, only the most tokens with probabilities `epsilon` or higher are kept for generation. filter_value (`float`, *optional*, defaults to `-float("Inf")`): All filtered values will be set to this float value. min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimum number of tokens that cannot be filtered. """ def __init__(self, epsilon: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): epsilon = float(epsilon) if epsilon <= 0 or epsilon >= 1: raise ValueError(f"`epsilon_cutoff` has to be a float > 0 and < 1, but is {epsilon}") min_tokens_to_keep = int(min_tokens_to_keep) if min_tokens_to_keep < 1: raise ValueError( f"`min_tokens_to_keep` has to be a strictly positive integer, but is {min_tokens_to_keep}" ) self.epsilon = epsilon self.filter_value = filter_value self.min_tokens_to_keep = min_tokens_to_keep @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: # Determine which indices to remove probabilities = scores.softmax(dim=-1) indices_to_remove = probabilities < self.epsilon # Keep the words with the 'min_tokens_to_keep'-highest probabilities top_k = min(self.min_tokens_to_keep, scores.size(-1)) # Safety check indices_to_remove = indices_to_remove & (scores < torch.topk(scores, top_k)[0][..., -1, None]) scores = scores.masked_fill(indices_to_remove, self.filter_value) return scores class EtaLogitsWarper(LogitsWarper): r""" [`LogitsWarper`] that performs eta-sampling, i.e. calculates a dynamic cutoff `eta := min(epsilon, sqrt(epsilon, e^-entropy(probabilities)))` and restricts to tokens with `prob >= eta`. Takes the largest min_tokens_to_keep tokens if no tokens satisfy this constraint. See [Truncation Sampling as Language Model Desmoothing](https://arxiv.org/abs/2210.15191) for more information. Args: min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimum number of tokens that cannot be filtered.""" def __init__(self, epsilon: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): epsilon = float(epsilon) if epsilon <= 0 or epsilon >= 1: raise ValueError(f"`eta_cutoff` has to be a float > 0 and < 1, but is {epsilon}") min_tokens_to_keep = int(min_tokens_to_keep) if min_tokens_to_keep < 1: raise ValueError( f"`min_tokens_to_keep` has to be a strictly positive integer, but is {min_tokens_to_keep}" ) self.epsilon = torch.tensor(epsilon) self.filter_value = filter_value self.min_tokens_to_keep = min_tokens_to_keep @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: # Calculate the adaptive cutoff probabilities = scores.softmax(dim=-1) entropy = torch.distributions.Categorical(logits=scores).entropy() eta = torch.min(self.epsilon, torch.sqrt(self.epsilon) * torch.exp(-entropy))[..., None] indices_to_remove = probabilities < eta # Keep the words with the 'min_tokens_to_keep'-highest probabilities top_k = min(self.min_tokens_to_keep, scores.size(-1)) # Safety check indices_to_remove = indices_to_remove & (scores < torch.topk(scores, top_k)[0][..., -1, None]) scores = scores.masked_fill(indices_to_remove, self.filter_value) return scores def _get_ngrams(ngram_size: int, prev_input_ids: torch.Tensor, num_hypos: int): generated_ngrams = [{} for _ in range(num_hypos)] for idx in range(num_hypos): gen_tokens = prev_input_ids[idx].tolist() generated_ngram = generated_ngrams[idx] for ngram in zip(*[gen_tokens[i:] for i in range(ngram_size)]): prev_ngram_tuple = tuple(ngram[:-1]) generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]] return generated_ngrams def _get_generated_ngrams(banned_ngrams, prev_input_ids, ngram_size, cur_len): # Before decoding the next token, prevent decoding of ngrams that have already appeared start_idx = cur_len + 1 - ngram_size ngram_idx = tuple(prev_input_ids[start_idx:cur_len].tolist()) return banned_ngrams.get(ngram_idx, []) def _calc_banned_ngram_tokens( ngram_size: int, prev_input_ids: torch.Tensor, num_hypos: int, cur_len: int ) -> List[Iterable[int]]: """Copied from fairseq for no_repeat_ngram in beam_search""" if cur_len + 1 < ngram_size: # return no banned tokens if we haven't generated no_repeat_ngram_size tokens yet return [[] for _ in range(num_hypos)] generated_ngrams = _get_ngrams(ngram_size, prev_input_ids, num_hypos) banned_tokens = [ _get_generated_ngrams(generated_ngrams[hypo_idx], prev_input_ids[hypo_idx], ngram_size, cur_len) for hypo_idx in range(num_hypos) ] return banned_tokens class NoRepeatNGramLogitsProcessor(LogitsProcessor): r""" [`LogitsProcessor`] that enforces no repetition of n-grams. See [Fairseq](https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345). Args: ngram_size (`int`): All ngrams of size `ngram_size` can only occur once. """ def __init__(self, ngram_size: int): if not isinstance(ngram_size, int) or ngram_size <= 0: raise ValueError(f"`ngram_size` has to be a strictly positive integer, but is {ngram_size}") self.ngram_size = ngram_size @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: num_batch_hypotheses = scores.shape[0] cur_len = input_ids.shape[-1] banned_batch_tokens = _calc_banned_ngram_tokens(self.ngram_size, input_ids, num_batch_hypotheses, cur_len) for i, banned_tokens in enumerate(banned_batch_tokens): scores[i, banned_tokens] = -float("inf") return scores class EncoderNoRepeatNGramLogitsProcessor(LogitsProcessor): r""" [`LogitsProcessor`] that enforces no repetition of encoder input ids n-grams for the decoder ids. See [ParlAI](https://github.com/facebookresearch/ParlAI/blob/master/parlai/core/torch_generator_agent.py#L1350). Args: encoder_ngram_size (`int`): All ngrams of size `ngram_size` can only occur within the encoder input ids. encoder_input_ids (`int`): The encoder_input_ids that should not be repeated within the decoder ids. """ def __init__(self, encoder_ngram_size: int, encoder_input_ids: torch.LongTensor): if not isinstance(encoder_ngram_size, int) or encoder_ngram_size <= 0: raise ValueError( f"`encoder_ngram_size` has to be a strictly positive integer, but is {encoder_ngram_size}" ) self.ngram_size = encoder_ngram_size if len(encoder_input_ids.shape) == 1: encoder_input_ids = encoder_input_ids.unsqueeze(0) self.batch_size = encoder_input_ids.shape[0] self.generated_ngrams = _get_ngrams(encoder_ngram_size, encoder_input_ids, self.batch_size) @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: # B x num_beams num_hypos = scores.shape[0] num_beams = num_hypos // self.batch_size cur_len = input_ids.shape[-1] banned_batch_tokens = [ _get_generated_ngrams( self.generated_ngrams[hypo_idx // num_beams], input_ids[hypo_idx], self.ngram_size, cur_len ) for hypo_idx in range(num_hypos) ] for i, banned_tokens in enumerate(banned_batch_tokens): scores[i, banned_tokens] = -float("inf") return scores class SequenceBiasLogitsProcessor(LogitsProcessor): """ [`LogitsProcessor`] that applies an additive bias on sequences. The bias is applied to the last token of a sequence when the next generated token can complete it. Consequently, to take the most of biasing sequences with more than one token, consider using beam methods (to gracefully work around partially completed sequences that have a negative bias) and applying the bias to their prefixes (to ensure the bias is applied earlier). <Tip> In order to get the token ids of the sequences that you want to bias, make sure to set `add_prefix_space=True` when initializing the tokenizer, and use `tokenizer(bad_words, add_special_tokens=False).input_ids`. The `add_prefix_space` argument is only supported for some slow tokenizers, as fast tokenizers' prefixing behaviours come from `pre tokenizers`. Read more [here](https://huggingface.co/docs/tokenizers/api/pre-tokenizers). </Tip> Args: sequence_bias (`Dict[Tuple[int], float]`): Dictionary that maps a sequence of tokens to its bias term. Positive biases increase the odds of the sequence being selected, while negative biases do the opposite. If a sequence has a length of 1, its bias will always be applied. Otherwise, the bias will only be applied if the sequence in question is about to be completed (in the token selection step after this processor is applied). Examples: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> model = AutoModelForCausalLM.from_pretrained("gpt2") >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> inputs = tokenizer(["The full name of Donald is Donald"], return_tensors="pt") >>> summary_ids = model.generate(inputs["input_ids"], max_new_tokens=4) >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True)[0]) The full name of Donald is Donald J. Trump Jr >>> # Now let's control generation through a bias. Please note that the tokenizer is initialized differently! >>> tokenizer_with_prefix_space = AutoTokenizer.from_pretrained("gpt2", add_prefix_space=True) >>> def get_tokens_as_tuple(word): ... return tuple(tokenizer_with_prefix_space([word], add_special_tokens=False).input_ids[0]) >>> # If we add a negative bias without beam search, it may become "stuck" in a prefix without good continuations >>> sequence_bias = {get_tokens_as_tuple("Trump"): -10.0} >>> biased_ids = model.generate(inputs["input_ids"], max_new_tokens=4, sequence_bias=sequence_bias) >>> print(tokenizer.batch_decode(biased_ids, skip_special_tokens=True)[0]) The full name of Donald is Donald J. Donald, >>> biased_ids = model.generate(inputs["input_ids"], max_new_tokens=4, num_beams=4, sequence_bias=sequence_bias) >>> print(tokenizer.batch_decode(biased_ids, skip_special_tokens=True)[0]) The full name of Donald is Donald Rumsfeld, >>> # We can also add a positive bias to nudge the model towards specific tokens or continuations >>> sequence_bias = {get_tokens_as_tuple("Donald Duck"): 10.0} >>> biased_ids = model.generate(inputs["input_ids"], max_new_tokens=4, num_beams=4, sequence_bias=sequence_bias) >>> print(tokenizer.batch_decode(biased_ids, skip_special_tokens=True)[0]) The full name of Donald is Donald Duck. ``` """ def __init__(self, sequence_bias: Dict[Tuple[int], float]): self.sequence_bias = sequence_bias self._validate_arguments() # Bias variables that will be populated on the first call (for retrocompatibility purposes, the vocabulary size # is infered in the first usage, which inhibits initializing here) self.sequences_length_greater_than_1 = [] self.length_1_bias = None self.length_greather_than_1_bias = None self.prepared_bias_variables = False @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: # 1 - Prepares the bias tensors. This is only needed the first time the logit processor is called. if not self.prepared_bias_variables: self._prepare_bias_variables(scores) # 2 - prepares an empty bias to add bias = torch.zeros_like(scores) # 3 - include the bias from length = 1 bias += self.length_1_bias # 4 - include the bias from length > 1, after determining which biased sequences may be completed. # `matching_mask` is a (batch_size, vocab_size) boolean mask that is True for all tokens whose corresponding # bias should be applied. The bias is applied on the last token of the sequence, if (and only if) the sequence # may become complete this iteration. matching_mask = torch.zeros_like(scores, dtype=torch.bool) for sequence_ids in self.sequences_length_greater_than_1: if len(sequence_ids) > input_ids.shape[1]: # the sequence is longer than the context, ignore continue prefix_length = len(sequence_ids) - 1 last_token = sequence_ids[-1] matching_rows = torch.eq( input_ids[:, -prefix_length:], torch.tensor(sequence_ids[:-1], dtype=input_ids.dtype, device=input_ids.device), ).prod(dim=1) matching_mask[:, last_token] |= matching_rows.bool() bias += torch.where( matching_mask, self.length_greather_than_1_bias, torch.tensor(0.0, device=self.length_greather_than_1_bias.device), ) # 5 - apply the bias to the scores scores = scores + bias return scores def _prepare_bias_variables(self, scores: torch.FloatTensor): vocabulary_size = scores.shape[-1] sequence_bias = self.sequence_bias tokens_with_bias = [] # Check biased tokens out of bounds invalid_biases = [] for sequence_ids in sequence_bias: for token_id in sequence_ids: if token_id >= vocabulary_size: invalid_biases.append(token_id) if len(invalid_biases) > 0: raise ValueError( f"The model vocabulary size is {vocabulary_size}, but the following tokens were being biased: " f"{invalid_biases}" ) # Precompute the bias tensors to be applied. Sequences of length 1 are kept separately, as they can be applied # with simpler logic. self.length_1_bias = torch.zeros((vocabulary_size,), dtype=torch.float).to(scores.device) self.length_greather_than_1_bias = torch.zeros((vocabulary_size,), dtype=torch.float).to(scores.device) for sequence_ids, bias in sequence_bias.items(): if len(sequence_ids) == 1: self.length_1_bias[sequence_ids[-1]] = bias else: self.sequences_length_greater_than_1.append(sequence_ids) if self.length_greather_than_1_bias[sequence_ids[-1]] != 0.0: raise ValueError( "Setting a bias on sequences that share a common token termination is not yet supported. " "Please open an issue if you see this error message (after checking that it doesn't already " "exist)." ) self.length_greather_than_1_bias[sequence_ids[-1]] = bias tokens_with_bias.append(sequence_ids[-1]) self.prepared_bias_variables = True def _validate_arguments(self): sequence_bias = self.sequence_bias if not isinstance(sequence_bias, dict) or len(sequence_bias) == 0: raise ValueError(f"`sequence_bias` has to be a non-empty dictionary, but is {sequence_bias}.") if any(not isinstance(sequence_ids, tuple) for sequence_ids in sequence_bias.keys()): raise ValueError(f"`sequence_bias` has to be a dict with tuples as keys, but is {sequence_bias}.") if any( any((not isinstance(token_id, (int, np.integer)) or token_id < 0) for token_id in sequence_ids) or len(sequence_ids) == 0 for sequence_ids in sequence_bias.keys() ): raise ValueError( f"Each key in `sequence_bias` has to be a non-empty tuple of positive integers, but is " f"{sequence_bias}." ) if any(not isinstance(bias, float) for bias in sequence_bias.values()): raise ValueError(f"`sequence_bias` has to be a dict with floats as values, but is {sequence_bias}.") class NoBadWordsLogitsProcessor(SequenceBiasLogitsProcessor): """ [`LogitsProcessor`] that enforces that specified sequences will never be selected. <Tip> In order to get the token ids of the words that should not appear in the generated text, make sure to set `add_prefix_space=True` when initializing the tokenizer, and use `tokenizer(bad_words, add_special_tokens=False).input_ids`. The `add_prefix_space` argument is only supported for some slow tokenizers, as fast tokenizers' prefixing behaviours come from `pre tokenizers`. Read more [here](https://huggingface.co/docs/tokenizers/api/pre-tokenizers). </Tip> Args: bad_words_ids (`List[List[int]]`): List of list of token ids that are not allowed to be generated. eos_token_id (`Union[int, List[int]]`): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. """ def __init__(self, bad_words_ids: List[List[int]], eos_token_id: Union[int, List[int]]): self.bad_word_ids = bad_words_ids self._validate_arguments() # Filter EOS token from bad_words_ids if eos_token_id is None: eos_token_id = [] if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] bad_words_ids = list( filter(lambda bad_token_seq: all(bad_token_seq != [i] for i in eos_token_id), bad_words_ids) ) # Forbidding a sequence is equivalent to setting its bias to -inf sequence_bias = {tuple(sequence): float("-inf") for sequence in bad_words_ids} super().__init__(sequence_bias=sequence_bias) def _validate_arguments(self): bad_words_ids = self.bad_word_ids if not isinstance(bad_words_ids, list) or len(bad_words_ids) == 0: raise ValueError(f"`bad_words_ids` has to be a non-empty list, but is {bad_words_ids}.") if any(not isinstance(bad_word_ids, list) for bad_word_ids in bad_words_ids): raise ValueError(f"`bad_words_ids` has to be a list of lists, but is {bad_words_ids}.") if any( any((not isinstance(token_id, (int, np.integer)) or token_id < 0) for token_id in bad_word_ids) for bad_word_ids in bad_words_ids ): raise ValueError( f"Each list in `bad_words_ids` has to be a list of positive integers, but is {bad_words_ids}." ) class PrefixConstrainedLogitsProcessor(LogitsProcessor): r""" [`LogitsProcessor`] that enforces constrained generation and is useful for prefix-conditioned constrained generation. See [Autoregressive Entity Retrieval](https://arxiv.org/abs/2010.00904) for more information. Args: prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`): This function constraints the beam search to allowed tokens only at each step. This function takes 2 arguments `inputs_ids` and the batch ID `batch_id`. It has to return a list with the allowed tokens for the next generation step conditioned on the previously generated tokens `inputs_ids` and the batch ID `batch_id`. """ def __init__(self, prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]], num_beams: int): self._prefix_allowed_tokens_fn = prefix_allowed_tokens_fn self._num_beams = num_beams @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: mask = torch.full_like(scores, -math.inf) for batch_id, beam_sent in enumerate(input_ids.view(-1, self._num_beams, input_ids.shape[-1])): for beam_id, sent in enumerate(beam_sent): mask[batch_id * self._num_beams + beam_id, self._prefix_allowed_tokens_fn(batch_id, sent)] = 0 return scores + mask class HammingDiversityLogitsProcessor(LogitsProcessor): r""" [`LogitsProcessor`] that enforces diverse beam search. Note that this logits processor is only effective for [`PreTrainedModel.group_beam_search`]. See [Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence Models](https://arxiv.org/pdf/1610.02424.pdf) for more details. Args: diversity_penalty (`float`): This value is subtracted from a beam's score if it generates a token same as any beam from other group at a particular time. Note that `diversity_penalty` is only effective if `group beam search` is enabled. num_beams (`int`): Number of beams used for group beam search. See [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details. num_beam_groups (`int`): Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams. See [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details. """ def __init__(self, diversity_penalty: float, num_beams: int, num_beam_groups: int): if not isinstance(diversity_penalty, float) or (not diversity_penalty > 0.0): raise ValueError("`diversity_penalty` should be a float strictly larger than 0.") self._diversity_penalty = diversity_penalty if not isinstance(num_beams, int) or num_beams < 2: raise ValueError("`num_beams` should be an integer strictly larger than 1.") self._num_beams = num_beams if not isinstance(num_beam_groups, int) or num_beam_groups < 2: raise ValueError("`num_beam_groups` should be an integer strictly larger than 1.") if num_beam_groups > num_beams: raise ValueError("`beam_groups` has to be smaller or equal to `num_beams`.") self._num_sub_beams = num_beams // num_beam_groups def __call__( self, input_ids: torch.LongTensor, scores: torch.FloatTensor, current_tokens: torch.LongTensor, beam_group_idx: int, ) -> torch.FloatTensor: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search current_tokens (`torch.LongTensor` of shape `(batch_size)`): Indices of input sequence tokens in the vocabulary, corresponding to the tokens selected by the other beam groups in the current generation step. beam_group_idx (`int`): The index of the beam group currently being processed. Return: `torch.FloatTensor` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. """ # hamming diversity: penalise using same token in current group which was used in previous groups at # the same time step batch_size = current_tokens.shape[0] // self._num_beams group_start_idx = beam_group_idx * self._num_sub_beams group_end_idx = min(group_start_idx + self._num_sub_beams, self._num_beams) group_size = group_end_idx - group_start_idx vocab_size = scores.shape[-1] if group_start_idx == 0: return scores for batch_idx in range(batch_size): # predicted tokens of last time step of previous groups previous_group_tokens = current_tokens[ batch_idx * self._num_beams : batch_idx * self._num_beams + group_start_idx ] token_frequency = torch.bincount(previous_group_tokens, minlength=vocab_size).to(scores.device) scores[batch_idx * group_size : (batch_idx + 1) * group_size] -= self._diversity_penalty * token_frequency return scores class ForcedBOSTokenLogitsProcessor(LogitsProcessor): r""" [`LogitsProcessor`] that enforces the specified token as the first generated token. Args: bos_token_id (`int`): The id of the token to force as the first generated token. """ def __init__(self, bos_token_id: int): self.bos_token_id = bos_token_id @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: cur_len = input_ids.shape[-1] if cur_len == 1: num_tokens = scores.shape[1] scores[:, [i for i in range(num_tokens) if i != self.bos_token_id]] = -float("inf") scores[:, self.bos_token_id] = 0 return scores class ForcedEOSTokenLogitsProcessor(LogitsProcessor): r""" [`LogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached. Args: max_length (`int`): The maximum length of the sequence to be generated. eos_token_id (`Union[int, List[int]]`): The id of the token to force as the last generated token when `max_length` is reached. Optionally, use a list to set multiple *end-of-sequence* tokens. """ def __init__(self, max_length: int, eos_token_id: Union[int, List[int]]): self.max_length = max_length if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] self.eos_token_id = eos_token_id @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: cur_len = input_ids.shape[-1] if cur_len == self.max_length - 1: num_tokens = scores.shape[1] scores[:, [i for i in range(num_tokens) if i not in self.eos_token_id]] = -float("inf") for i in self.eos_token_id: scores[:, i] = 0 return scores class InfNanRemoveLogitsProcessor(LogitsProcessor): r""" [`LogitsProcessor`] that removes all `nan` and `inf` values to avoid the generation method to fail. Note that using the logits processor should only be used if necessary since it can slow down the generation method. """ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: # set all nan values to 0.0 scores[scores != scores] = 0.0 # set all inf values to max possible value scores[scores == float("inf")] = torch.finfo(scores.dtype).max return scores class ExponentialDecayLengthPenalty(LogitsProcessor): r""" [`LogitsProcessor`] that exponentially increases the score of the eos_token_id after regulation_start has been reached. Args: exponential_decay_length_penalty (`tuple(int, float)`): This tuple shall consist of: `(start_index, decay_factor)` where `start_index` indicates where penalty starts and `decay_factor` represents the factor of exponential decay eos_token_id (`Union[int, List[int]]`): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. input_ids_seq_length (`int`): The length of the input sequence. """ def __init__( self, exponential_decay_length_penalty: Tuple[int, float], eos_token_id: Union[int, List[int]], input_ids_seq_length: int, ): self.regulation_start = exponential_decay_length_penalty[0] + input_ids_seq_length self.regulation_factor = exponential_decay_length_penalty[1] if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] self.eos_token_id = eos_token_id @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: cur_len = input_ids.shape[-1] if cur_len > self.regulation_start: for i in self.eos_token_id: scores[:, i] = scores[:, i] * pow(self.regulation_factor, cur_len - self.regulation_start) return scores class LogitNormalization(LogitsProcessor, LogitsWarper): r""" [`LogitsWarper`] and [`LogitsProcessor`] for normalizing the scores using log-softmax. It's important to normalize the scores during beam search, after applying the logits processors or warpers, since the search algorithm used in this library doesn't do it (it only does it before, but they may need re-normalization) but it still supposes that the scores are normalized when comparing the hypotheses. """ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: scores = scores.log_softmax(dim=-1) return scores class SuppressTokensAtBeginLogitsProcessor(LogitsProcessor): r""" [`SuppressTokensAtBeginLogitsProcessor`] supresses a list of tokens as soon as the `generate` function starts generating using `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` at not sampled at the begining of the generation. """ def __init__(self, begin_suppress_tokens, begin_index): self.begin_suppress_tokens = list(begin_suppress_tokens) self.begin_index = begin_index @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: if input_ids.shape[1] == self.begin_index: scores[:, self.begin_suppress_tokens] = -float("inf") return scores class SuppressTokensLogitsProcessor(LogitsProcessor): r"""This processor can be used to suppress a list of tokens. The processor will set their log probs to `-inf` so that they are not sampled.""" def __init__(self, suppress_tokens): self.suppress_tokens = list(suppress_tokens) @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: scores[:, self.suppress_tokens] = -float("inf") return scores class ForceTokensLogitsProcessor(LogitsProcessor): r"""This processor takes a list of pairs of integers which indicates a mapping from generation indices to token indices that will be forced before sampling. The processor will set their log probs to `inf` so that they are sampled at their corresponding index.""" def __init__(self, force_token_map: List[List[int]]): self.force_token_map = dict(force_token_map) @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: generation_idx = input_ids.shape[-1] current_token = self.force_token_map.get(generation_idx, None) if current_token is not None: scores[:, :] = -float("inf") scores[:, current_token] = 0 return scores class WhisperTimeStampLogitsProcessor(LogitsProcessor): r""" Whisper specific Processor. This processor can be used to force a list of tokens. The processor will set their log probs to `inf` so that they are sampled at their corresponding index. Args: generate_config (`GenerateConfig`): The generate config used to generate the output. The following parameters are required: eos_token_id (`int`, *optional*, defaults to 50257): The id of the *end-of-sequence* token. no_timestamps_token_id (`int`, *optional*, defaults to 50363): The id of the `"<|notimestamps|>"` token. max_initial_timestamp_index (`int`, *optional*, defaults to 1): Used to set the maximum value of the initial timestamp. This is used to prevent the model from predicting timestamps that are too far in the future. """ def __init__(self, generate_config): # support for the kwargs self.eos_token_id = generate_config.eos_token_id self.no_timestamps_token_id = generate_config.no_timestamps_token_id self.timestamp_begin = generate_config.no_timestamps_token_id + 1 self.begin_index = len(generate_config.forced_decoder_ids) + 2 if generate_config.forced_decoder_ids[-1][1] == self.no_timestamps_token_id: self.begin_index -= 1 self.max_initial_timestamp_index = generate_config.max_initial_timestamp_index @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: # suppress <|notimestamps|> which is handled by without_timestamps scores[:, self.no_timestamps_token_id] = -float("inf") if input_ids.shape[1] == self.begin_index - 1: scores[:, :] = -float("inf") scores[:, self.timestamp_begin] = 0 return scores # timestamps have to appear in pairs, except directly before eos_token; mask logits accordingly for k in range(input_ids.shape[0]): seq = list(input_ids[k, self.begin_index :].tolist()) last_was_timestamp = len(seq) >= 1 and seq[-1] >= self.timestamp_begin penultimate_was_timestamp = len(seq) < 2 or seq[-2] >= self.timestamp_begin if last_was_timestamp: if penultimate_was_timestamp: # has to be non-timestamp scores[k, self.timestamp_begin :] = -float("inf") else: # cannot be normal text tokens scores[k, : self.eos_token_id] = -float("inf") # apply the `max_initial_timestamp` option if input_ids.shape[1] == self.begin_index and self.max_initial_timestamp_index is not None: last_allowed = self.timestamp_begin + self.max_initial_timestamp_index scores[:, last_allowed + 1 :] = -float("inf") # if sum of probability over timestamps is above any other token, sample timestamp logprobs = torch.nn.functional.log_softmax(scores.float(), dim=-1) for k in range(input_ids.shape[0]): timestamp_logprob = logprobs[k, self.timestamp_begin :].logsumexp(dim=-1) max_text_token_logprob = logprobs[k, : self.timestamp_begin].max() if timestamp_logprob > max_text_token_logprob: scores[k, : self.timestamp_begin] = -float("inf") return scores class ClassifierFreeGuidanceLogitsProcessor(LogitsProcessor): r"""Logits processor for classifier free guidance (CFG). The scores are split over the batch dimension, where the first half correspond to the conditional logits (predicted from the input prompt) and the second half correspond to the unconditional logits (predicted from an empty or 'null' prompt). The processor computes a weighted average across the conditional and unconditional logits, parameterised by the `guidance_scale`. Args: guidance_scale (float): The guidance scale for classifier free guidance (CFG). CFG is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages the model to generate samples that are more closely linked to the input prompt, usually at the expense of poorer quality. """ def __init__(self, guidance_scale): if guidance_scale > 1: self.guidance_scale = guidance_scale else: raise ValueError( "Require guidance scale >1 to use the classifier free guidance processor, got guidance scale " f"{guidance_scale}." ) @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: # simple check to make sure we have compatible batch sizes between our # logits scores (cond + uncond) and input ids (cond only) if scores.shape[0] != 2 * input_ids.shape[0]: raise ValueError( f"Logits should have twice the batch size of the input ids, the first half of batches corresponding to " f"the conditional inputs, and the second half of batches corresponding to the unconditional inputs. Got " f"batch size {scores.shape[0]} for the logits and {input_ids.shape[0]} for the input ids." ) unguided_bsz = scores.shape[0] // 2 cond_logits, uncond_logits = scores.split(unguided_bsz, dim=0) scores = uncond_logits + (cond_logits - uncond_logits) * self.guidance_scale return scores class AlternatingCodebooksLogitsProcessor(LogitsProcessor): r""" [`LogitsProcessor`] enforcing alternated generation between the two codebooks of [`Bark`]'s fine submodel. Args: input_start_len (`int`): The length of the initial input sequence. semantic_vocab_size (`int`): Vocabulary size of the semantic part, i.e number of tokens associated to the semantic vocabulary. codebook_size (`int`): Number of tokens associated to the codebook. """ def __init__(self, input_start_len: int, semantic_vocab_size: int, codebook_size: int): if not isinstance(input_start_len, int) or input_start_len < 0: raise ValueError(f"`input_starting_length` has to be a non-negative integer, but is {input_start_len}") self.input_start_len = input_start_len self.semantic_vocab_size = semantic_vocab_size self.codebook_size = codebook_size def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: curr_len = input_ids.shape[-1] # even -> first codebook, odd -> second codebook is_first_codebook = ((curr_len - self.input_start_len) % 2) == 0 if is_first_codebook: scores[:, : self.semantic_vocab_size] = -float("inf") scores[:, self.semantic_vocab_size + self.codebook_size :] = -float("inf") else: scores[:, : self.semantic_vocab_size + self.codebook_size] = -float("inf") return scores
56,335
46.460826
131
py
transformers
transformers-main/src/transformers/generation/tf_logits_process.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import List, Tuple import numpy as np import tensorflow as tf from ..tf_utils import stable_softmax from ..utils import add_start_docstrings from ..utils.logging import get_logger logger = get_logger(__name__) TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING = r""" Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`tf.Tensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search. cur_len (`int`): The current length of valid input sequence tokens. In the TF implementation, the input_ids' sequence length is the maximum length generate can produce, and we need to know which of its tokens are valid. kwargs (`Dict[str, Any]`, *optional*): Additional logits processor specific kwargs. Return: `tf.Tensor` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. """ class TFLogitsProcessor: """Abstract base class for all logit processors that can be applied during generation.""" @add_start_docstrings(TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: """TF method for processing logits.""" raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) class TFLogitsWarper: """Abstract base class for all logit warpers that can be applied during generation with multinomial sampling.""" @add_start_docstrings(TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: """TF method for warping logits.""" raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) class TFLogitsProcessorList(list): """ This class can be used to create a list of [`TFLogitsProcessor`] to subsequently process a `scores` input tensor. This class inherits from list and adds a specific *__call__* method to apply each [`TFLogitsProcessor`] to the inputs. """ @add_start_docstrings(TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int, **kwargs) -> tf.Tensor: for processor in self: function_args = inspect.signature(processor.__call__).parameters if len(function_args) > 3: if not all(arg in kwargs for arg in list(function_args.keys())[2:]): raise ValueError( f"Make sure that all the required parameters: {list(function_args.keys())} for " f"{processor.__class__} are passed to the logits processor." ) scores = processor(input_ids, scores, cur_len, **kwargs) else: scores = processor(input_ids, scores, cur_len) return scores class TFTemperatureLogitsWarper(TFLogitsWarper): r""" [`TFLogitsWarper`] for temperature (exponential scaling output probability distribution). Args: temperature (`float`): The value used to module the logits distribution. """ def __init__(self, temperature: float): if not isinstance(temperature, float) or not (temperature > 0): raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}") self.temperature = temperature def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: scores = scores / self.temperature return scores class TFTopKLogitsWarper(TFLogitsWarper): r""" [`TFLogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements. Args: top_k (`int`): The number of highest probability vocabulary tokens to keep for top-k-filtering. filter_value (`float`, *optional*, defaults to `-float("Inf")`): All filtered values will be set to this float value. min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimum number of tokens that cannot be filtered. """ def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): if not isinstance(top_k, int) or top_k <= 0: raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}") self.top_k = max(top_k, min_tokens_to_keep) self.filter_value = filter_value def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: top_k = min(self.top_k, scores.shape[-1]) # Safety check # Boolean mask containing all tokens with a probability less than the last token of the top-k indices_to_remove = scores < tf.math.top_k(scores, k=top_k)[0][..., -1:] next_scores = tf.where(indices_to_remove, self.filter_value, scores) return next_scores class TFTopPLogitsWarper(TFLogitsWarper): """ [`TFLogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to <= prob_cut_off. Args: top_p (`float`): If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are kept for generation. filter_value (`float`, *optional*, defaults to `-float("Inf")`): All filtered values will be set to this float value. min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimum number of tokens that cannot be filtered. """ def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): if not isinstance(top_p, float) or (top_p < 0 or top_p > 1.0): raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}") if not isinstance(min_tokens_to_keep, int) or (min_tokens_to_keep < 1): raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}") self.top_p = top_p self.filter_value = filter_value self.min_tokens_to_keep = min_tokens_to_keep def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: topk_scores, topk_indices = tf.math.top_k(scores, scores.shape[-1]) mask_scores = tf.fill(scores.shape, self.filter_value) cumulative_probs = tf.math.cumsum(stable_softmax(topk_scores, axis=-1), axis=-1) score_mask = cumulative_probs < self.top_p # Also include the token that is higher than top_p (the first false = shift and insert a True on the left) score_mask = tf.concat((tf.ones([score_mask.shape[0], 1], dtype=tf.bool), score_mask[:, :-1]), axis=-1) # Ensure min tokens to keep score_mask = tf.concat( ( tf.ones([score_mask.shape[0], self.min_tokens_to_keep], dtype=tf.bool), score_mask[:, self.min_tokens_to_keep :], ), axis=-1, ) # Mask the values that do not fit the criteria topk_next_scores = tf.where(score_mask, topk_scores, mask_scores) # Undo the topk sorting: converts the 2D matrix of per-row original indices of shape (batch_size, vocab_size) # to a 3D tensor of shape (batch_size, vocab_size, 2) containing the original score coordinate, from which we # can scatter (i.e. `scatter_indices[row, col, :]` is a tensor containing `[row, topk_indices[row, col]]`) scatter_rows = tf.tile(tf.expand_dims(tf.range(topk_indices.shape[0]), axis=-1), [1, topk_indices.shape[-1]]) scatter_indices = tf.stack((scatter_rows, topk_indices), axis=-1) next_scores = tf.scatter_nd(scatter_indices, topk_next_scores, shape=topk_next_scores.shape) return next_scores class TFMinLengthLogitsProcessor(TFLogitsProcessor): r""" [`TFLogitsProcessor`] enforcing a min-length by setting EOS probability to 0. Args: min_length (`int`): The minimum length below which the score of `eos_token_id` is set to `-float("Inf")`. eos_token_id (`int`): The id of the *end-of-sequence* token. """ def __init__(self, min_length: int, eos_token_id: int): if not isinstance(min_length, int) or min_length < 0: raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}") if not isinstance(eos_token_id, int) or eos_token_id < 0: raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}") self.min_length = min_length self.eos_token_id = eos_token_id def _apply_eos_token_mask(self, scores: tf.Tensor) -> tf.Tensor: eos_token_id_mask = tf.range(scores.shape[-1]) == self.eos_token_id scores = tf.where(eos_token_id_mask, float("-inf"), scores) return scores def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: # applies eos token masking if the first argument is true scores = tf.cond( tf.less(cur_len, self.min_length), lambda: self._apply_eos_token_mask(scores), lambda: tf.identity(scores), ) return scores class TFRepetitionPenaltyLogitsProcessor(TFLogitsProcessor): r""" [`TFLogitsProcessor`] enforcing an exponential penalty on repeated sequences. Args: repetition_penalty (`float`): The parameter for repetition penalty. 1.0 means no penalty. See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. """ def __init__(self, penalty: float): if not isinstance(penalty, float) or not (penalty > 0): raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}") self.penalty = penalty def _create_score_penalties(self, input_ids: tf.Tensor, logits: tf.Tensor) -> tf.Tensor: # We want to populate the penalties in the positions of `input_ids`. Since XLA can't handle shapes unknown # before runtime, `tf.unique` can't be used. Therefore, we may have redundant updates, when a given row has # the same token multiple times. # Gathers the penalties to apply logit_penalties = tf.gather(logits, input_ids, axis=1, batch_dims=1) logit_penalties = tf.where(logit_penalties > 0, 1 / self.penalty, logit_penalties) logit_penalties = tf.where(logit_penalties < 0, self.penalty, logit_penalties) # Scatters the penalties token_penalties = tf.ones(logits.shape) batch_size = input_ids.shape[0] seq_len = tf.shape(input_ids)[1] # the sequence length has dynamic size, hence the dynamic shape indexable_prev_input_ids = tf.concat( ( tf.expand_dims(tf.repeat(tf.range(batch_size), seq_len), axis=-1), tf.expand_dims(tf.reshape(input_ids, [-1]), axis=-1), ), axis=1, ) token_penalties = tf.tensor_scatter_nd_update( token_penalties, indices=indexable_prev_input_ids, updates=tf.reshape(logit_penalties, [-1]) ) return token_penalties def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: score_penalties = self._create_score_penalties(input_ids[:, :cur_len], scores) scores = tf.math.multiply(scores, score_penalties) return scores class TFNoBadWordsLogitsProcessor(TFLogitsProcessor): """ [`TFLogitsProcessor`] that enforces that specified sequences will never be sampled. Args: bad_words_ids (`List[List[int]]`): List of list of token ids that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, make sure to set `add_prefix_space=True` when initializing the tokenizer, and use `tokenizer(bad_words, add_special_tokens=False).input_ids`. The `add_prefix_space` argument is only supported for some slow tokenizers, as fast tokenizers' prefixing behaviours come from `pre tokenizers`. Read more [here](https://huggingface.co/docs/tokenizers/api/pre-tokenizers). eos_token_id (`int`): The id of the *end-of-sequence* token. """ def __init__(self, bad_words_ids: List[List[int]], eos_token_id: int): if not isinstance(bad_words_ids, List) or len(bad_words_ids) == 0: raise ValueError(f"`bad_words_ids` has to be a non-empty list, but is {bad_words_ids}.") if any(not isinstance(bad_word_ids, list) for bad_word_ids in bad_words_ids): raise ValueError(f"`bad_words_ids` has to be a list of lists, but is {bad_words_ids}.") if any( any((not isinstance(token_id, (int, np.integer)) or token_id < 0) for token_id in bad_word_ids) for bad_word_ids in bad_words_ids ): raise ValueError( f"Each list in `bad_words_ids` has to be a list of positive integers, but is {bad_words_ids}." ) # stores the information about bad words in three tensors: # 1. a rectangular tensor with the forbidden sequences (padded with `-1`), for full data comparisons self.bad_word_seqs_ids = tf.ragged.constant(bad_words_ids).to_tensor(default_value=-1) # 2. a tensor with the unpadded length of each forbidden sequence, for quick length comparisons bad_word_seqs_len = [len(bad_words) for bad_words in bad_words_ids] if any(word_len == 0 for word_len in bad_word_seqs_len): raise ValueError(f"Banned words token sequences {bad_words_ids} cannot have an empty list") self.bad_word_seqs_len = tf.convert_to_tensor(bad_word_seqs_len, dtype=tf.int32) # 3. a tensor containing the last token for each sequence, for easy access to the tokens that may be banned self.seq_forbidden_tokens = tf.convert_to_tensor([bad_words[-1] for bad_words in bad_words_ids]) def _calc_row_banned_bad_tokens(self, row_input_ids: tf.Tensor) -> tf.Tensor: def _tokens_match(bad_word_seq_number): def _len_one(): # If the bad sequence only has one token, always mask it return tf.cond( tf.math.equal(self.bad_word_seqs_len[bad_word_seq_number], 1), lambda: tf.ones((), dtype=tf.bool), _len_greater_than_cur_len, ) def _len_greater_than_cur_len(): # Otherwise, if the bad sequence is longer than the current length they can't ever match return tf.cond( tf.math.greater(self.bad_word_seqs_len[bad_word_seq_number], tf.shape(row_input_ids)[0]), lambda: tf.zeros((), dtype=tf.bool), _match_found, ) def _match_found(): # Finaly, runs the actual comparison. Can only be called if the previous comparisons do not yield # an answer (otherwise we get indexing exceptions) compare_len = self.bad_word_seqs_len[bad_word_seq_number] - 1 return tf.cond( tf.math.reduce_all( tf.math.equal( row_input_ids[-compare_len:], self.bad_word_seqs_ids[bad_word_seq_number, :compare_len] ) ), lambda: tf.ones((), dtype=tf.bool), lambda: tf.zeros((), dtype=tf.bool), ) match = _len_one() return match # Compares the current row against all bad word sequences, obtaining a mask with the matches. match_mask = tf.map_fn(_tokens_match, tf.range(self.bad_word_seqs_ids.shape[0]), fn_output_signature=tf.bool) row_banned_tokens = self.seq_forbidden_tokens[match_mask] return row_banned_tokens def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: # We want to mask some banned tokens, at a score level. Since the banned tokens depend on the previous # `input_ids`, they may have a different length for each row, and they may even be empty for some rows. # To remain simple and XLA-compatible, we work on a per-row fashion. # TODO (Joao): this function might trigger XLA retracing as `cur_len` increases. Fix it if it becomes # a frequent choke point. (make `cur_len` a tensor?) def _get_row_updated_score(row_inputs: Tuple[tf.Tensor]) -> tf.Tensor: row_input_ids, row_score = row_inputs banned_tokens = self._calc_row_banned_bad_tokens(row_input_ids[:cur_len]) banned_tokens_mask = tf.scatter_nd( indices=tf.expand_dims(banned_tokens, axis=-1), updates=tf.ones_like(banned_tokens, dtype=tf.bool), shape=row_score.shape, ) row_score = tf.where(banned_tokens_mask, -float("inf"), row_score) return row_score scores = tf.map_fn(_get_row_updated_score, (input_ids, scores), fn_output_signature=tf.float32) return scores class TFNoRepeatNGramLogitsProcessor(TFLogitsProcessor): r""" [`TFLogitsProcessor`] that enforces no repetition of n-grams. See [Fairseq](https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345). Args: ngram_size (`int`): All ngrams of size `ngram_size` can only occur once. """ def __init__(self, ngram_size: int): if not isinstance(ngram_size, int) or ngram_size <= 0: raise ValueError(f"`ngram_size` has to be a strictly positive integer, but is {ngram_size}") self.ngram_size = ngram_size def calc_banned_ngram_tokens(self, input_ids, num_hypos, cur_len): # Copied from fairseq for no_repeat_ngram in beam_search if cur_len + 1 < self.ngram_size: # return no banned tokens if we haven't generated ngram_size tokens yet return [[] for _ in range(num_hypos)] generated_ngrams = [{} for _ in range(num_hypos)] prev_input_ids = input_ids[:, :cur_len] for idx in range(num_hypos): gen_tokens = prev_input_ids[idx].numpy().tolist() generated_ngram = generated_ngrams[idx] for ngram in zip(*[gen_tokens[i:] for i in range(self.ngram_size)]): prev_ngram_tuple = tuple(ngram[:-1]) generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]] def _get_generated_ngrams(hypo_idx): # Before decoding the next token, prevent decoding of ngrams that have already appeared start_idx = cur_len + 1 - self.ngram_size ngram_idx = tuple(prev_input_ids[hypo_idx, start_idx:cur_len].numpy().tolist()) return generated_ngrams[hypo_idx].get(ngram_idx, []) banned_tokens = [_get_generated_ngrams(hypo_idx) for hypo_idx in range(num_hypos)] return banned_tokens def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: # TODO (joao): enable XLA on this logits processor. See discussion and attempts in # https://github.com/huggingface/transformers/pull/16974 if not tf.executing_eagerly(): raise NotImplementedError("TFNoRepeatNGramLogitsProcessor is only implemented for eager execution.") batch_size, vocab_size = scores.shape banned_tokens = self.calc_banned_ngram_tokens(input_ids, batch_size, cur_len) # create banned_tokens boolean mask banned_tokens_indices_mask = [] for banned_tokens_slice in banned_tokens: banned_tokens_indices_mask.append( [True if token in banned_tokens_slice else False for token in range(vocab_size)] ) scores = tf.where(tf.convert_to_tensor(banned_tokens_indices_mask, dtype=tf.bool), -float("inf"), scores) return scores class TFForcedBOSTokenLogitsProcessor(TFLogitsProcessor): r""" [`TFLogitsProcessor`] that enforces the specified token as the first generated token. Args: bos_token_id (`int`): The id of the token to force as the first generated token. """ def __init__(self, bos_token_id: int): if bos_token_id < 0: raise ValueError(f"The forced bos token id must be a non-negative integer, got {bos_token_id}") self.bos_token_id = bos_token_id def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: if cur_len == 1: batch_size, num_tokens = scores.shape # sets the score to 0 in the bos_token_id column scores = tf.zeros((batch_size, 1)) # sets the score to -inf everywhere else if self.bos_token_id > 0: scores = tf.concat((tf.broadcast_to(-float("inf"), (batch_size, self.bos_token_id)), scores), axis=-1) if self.bos_token_id < (num_tokens - 1): scores = tf.concat( (scores, tf.broadcast_to(-float("inf"), (batch_size, (num_tokens - 1) - self.bos_token_id))), axis=-1, ) return scores class TFForcedEOSTokenLogitsProcessor(TFLogitsProcessor): r""" [`TFLogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached. Args: max_length (`int`): The maximum length of the sequence to be generated. eos_token_id (`int`): The id of the token to force as the last generated token when `max_length` is reached. """ def __init__(self, max_length: int, eos_token_id: int): self.max_length = max_length if eos_token_id < 0: raise ValueError(f"The forced eos token id must be a non-negative integer, got {eos_token_id}") self.eos_token_id = eos_token_id def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: if cur_len == self.max_length - 1: batch_size, num_tokens = scores.shape # sets the score to 0 in the eos_token_id column scores = tf.zeros((batch_size, 1)) # sets the score to -inf everywhere else if self.eos_token_id > 0: scores = tf.concat((tf.broadcast_to(-float("inf"), (batch_size, self.eos_token_id)), scores), axis=-1) if self.eos_token_id < (num_tokens - 1): scores = tf.concat( (scores, tf.broadcast_to(-float("inf"), (batch_size, (num_tokens - 1) - self.eos_token_id))), axis=-1, ) return scores class TFSuppressTokensAtBeginLogitsProcessor(TFLogitsProcessor): r""" [`TFSuppressTokensAtBeginLogitsProcessor`] suppresses a list of tokens as soon as the `generate` function starts generating using `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` at not sampled at the begining of the generation. """ def __init__(self, begin_suppress_tokens, begin_index): self.begin_suppress_tokens = list(begin_suppress_tokens) self.begin_index = begin_index def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: scores = tf.cond( tf.equal(cur_len, self.begin_index), lambda: tf.tensor_scatter_nd_update( scores, indices=[[i, token] for i in range(scores.shape[0]) for token in self.begin_suppress_tokens], updates=[-float("inf") for _ in range(scores.shape[0] * len(self.begin_suppress_tokens))], ), lambda: scores, ) return scores class TFSuppressTokensLogitsProcessor(TFLogitsProcessor): r"""This processor can be used to suppress a list of tokens. The processor will set their log probs to `-inf` so that they are not sampled.""" def __init__(self, suppress_tokens): self.suppress_tokens = list(suppress_tokens) def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: scores = tf.tensor_scatter_nd_update( scores, indices=[[i, token] for i in range(scores.shape[0]) for token in self.suppress_tokens], updates=[-float("inf") for _ in range(scores.shape[0] * len(self.suppress_tokens))], ) return scores class TFForceTokensLogitsProcessor(TFLogitsProcessor): r"""This processor takes a list of pairs of integers which indicates a mapping from generation indices to token indices that will be forced before sampling. The processor will set their log probs to `0` and all other tokens to `-inf` so that they are sampled at their corresponding index.""" def __init__(self, force_token_map: List[List[int]]): force_token_map = dict(force_token_map) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have an negative value. force_token_array = np.ones((max(force_token_map.keys()) + 1), dtype=np.int32) * -1 for index, token in force_token_map.items(): if token is not None: force_token_array[index] = token self.force_token_array = tf.convert_to_tensor(force_token_array, dtype=tf.int32) def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: def _force_token(generation_idx): batch_size = scores.shape[0] current_token = self.force_token_array[generation_idx] new_scores = tf.ones_like(scores, dtype=scores.dtype) * -float("inf") indices = tf.stack((tf.range(batch_size), tf.tile([current_token], [batch_size])), axis=1) updates = tf.zeros((batch_size,), dtype=scores.dtype) new_scores = tf.tensor_scatter_nd_update(new_scores, indices, updates) return new_scores scores = tf.cond( tf.greater_equal(cur_len, tf.shape(self.force_token_array)[0]), # If the current length is geq than the length of force_token_array, the processor does nothing. lambda: tf.identity(scores), # Otherwise, it may force a certain token. lambda: tf.cond( tf.greater_equal(self.force_token_array[cur_len], 0), # Only valid (positive) tokens are forced lambda: _force_token(cur_len), # Otherwise, the processor does nothing. lambda: scores, ), ) return scores
28,136
46.528716
131
py
transformers
transformers-main/src/transformers/generation/beam_constraints.py
from abc import ABC, abstractmethod from typing import List, Optional class Constraint(ABC): r"""Abstract base class for all constraints that can be applied during generation. It must define how the constraint can be satisfied. All classes that inherit Constraint must follow the requirement that ```py completed = False while not completed: _, completed = constraint.update(constraint.advance()) ``` will always terminate (halt). """ def __init__(self): # test for the above condition self.test() def test(self): """ Tests whether this constraint has been properly defined. """ counter = 0 completed = False while not completed: if counter == 1: self.reset() advance = self.advance() if not self.does_advance(advance): raise Exception( "Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." ) stepped, completed, reset = self.update(advance) counter += 1 if counter > 10000: raise Exception("update() does not fulfill the constraint.") if self.remaining() != 0: raise Exception("Custom Constraint is not defined correctly.") @abstractmethod def advance(self): """ When called, returns the token that would take this constraint one step closer to being fulfilled. Return: token_ids(`torch.tensor`): Must be a tensor of a list of indexable tokens, not some integer. """ raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) @abstractmethod def does_advance(self, token_id: int): """ Reads in a token and returns whether it creates progress. """ raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) @abstractmethod def update(self, token_id: int): """ Reads in a token and returns booleans that indicate the progress made by it. This function will update the state of this object unlikes `does_advance(self, token_id: int)`. This isn't to test whether a certain token will advance the progress; it's to update its state as if it has been generated. This becomes important if token_id != desired token (refer to else statement in PhrasalConstraint) Args: token_id(`int`): The id of a newly generated token in the beam search. Return: stepped(`bool`): Whether this constraint has become one step closer to being fulfuilled. completed(`bool`): Whether this constraint has been completely fulfilled by this token being generated. reset (`bool`): Whether this constraint has reset its progress by this token being generated. """ raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) @abstractmethod def reset(self): """ Resets the state of this constraint to its initialization. We would call this in cases where the fulfillment of a constraint is abrupted by an unwanted token. """ raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) @abstractmethod def remaining(self): """ Returns the number of remaining steps of `advance()` in order to complete this constraint. """ raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) @abstractmethod def copy(self, stateful=False): """ Creates a new instance of this constraint. Args: stateful(`bool`): Whether to not only copy the constraint for new instance, but also its state. Return: constraint(`Constraint`): The same constraint as the one being called from. """ raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) class PhrasalConstraint(Constraint): r""" [`Constraint`] enforcing that an ordered sequence of tokens is included in the output. Args: token_ids (`List[int]`): The id of the token that must be generated by the output. """ def __init__(self, token_ids: List[int]): super(Constraint, self).__init__() if not isinstance(token_ids, list) or len(token_ids) == 0: raise ValueError(f"`token_ids` has to be a non-empty list, but is {token_ids}.") if any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids): raise ValueError(f"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.") self.token_ids = token_ids self.seqlen = len(self.token_ids) self.fulfilled_idx = -1 # the index of the currently fulfilled step self.completed = False def advance(self): if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def does_advance(self, token_id: int): if not isinstance(token_id, int): raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}") if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def update(self, token_id: int): if not isinstance(token_id, int): raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}") stepped = False completed = False reset = False if self.does_advance(token_id): self.fulfilled_idx += 1 stepped = True if self.fulfilled_idx == (self.seqlen - 1): completed = True self.completed = completed else: # failed to make progress. reset = True self.reset() return stepped, completed, reset def reset(self): self.completed = False self.fulfilled_idx = 0 def remaining(self): return self.seqlen - (self.fulfilled_idx + 1) def copy(self, stateful=False): new_constraint = PhrasalConstraint(self.token_ids) if stateful: new_constraint.seq_len = self.seqlen new_constraint.fulfilled_idx = self.fulfilled_idx new_constraint.completed = self.completed return new_constraint class DisjunctiveTrie: def __init__(self, nested_token_ids: List[List[int]], no_subsets=True): r""" A helper class that builds a trie with the words represented in `nested_token_ids`. """ self.max_height = max([len(one) for one in nested_token_ids]) root = {} for token_ids in nested_token_ids: level = root for tidx, token_id in enumerate(token_ids): if token_id not in level: level[token_id] = {} level = level[token_id] if no_subsets and self.has_subsets(root, nested_token_ids): raise ValueError( "Each list in `nested_token_ids` can't be a complete subset of another list, but is" f" {nested_token_ids}." ) self.trie = root def next_tokens(self, current_seq): """ The next possible tokens that will progress the trie, given the current sequence of tokens in `current_seq`. """ start = self.trie for current_token in current_seq: start = start[current_token] next_tokens = list(start.keys()) return next_tokens def reached_leaf(self, current_seq): next_tokens = self.next_tokens(current_seq) return len(next_tokens) == 0 def count_leaves(self, root): next_nodes = list(root.values()) if len(next_nodes) == 0: return 1 else: return sum([self.count_leaves(nn) for nn in next_nodes]) def has_subsets(self, trie, nested_token_ids): """ Returns whether # of leaves == # of words. Otherwise some word is a subset of another. """ leaf_count = self.count_leaves(trie) return len(nested_token_ids) != leaf_count class DisjunctiveConstraint(Constraint): r""" A special [`Constraint`] that is fulfilled by fulfilling just one of several constraints. Args: nested_token_ids (`List[List[int]]`): a list of words, where each word is a list of ids. This constraint is fulfilled by generating just one from the list of words. """ def __init__(self, nested_token_ids: List[List[int]]): super(Constraint, self).__init__() if not isinstance(nested_token_ids, list) or len(nested_token_ids) == 0: raise ValueError(f"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.") if any(not isinstance(token_ids, list) for token_ids in nested_token_ids): raise ValueError(f"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.") if any( any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids) for token_ids in nested_token_ids ): raise ValueError( f"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." ) self.trie = DisjunctiveTrie(nested_token_ids) self.token_ids = nested_token_ids self.seqlen = self.trie.max_height self.current_seq = [] self.completed = False def advance(self): token_list = self.trie.next_tokens(self.current_seq) if len(token_list) == 0: return None else: return token_list def does_advance(self, token_id: int): if not isinstance(token_id, int): raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}") next_tokens = self.trie.next_tokens(self.current_seq) return token_id in next_tokens def update(self, token_id: int): if not isinstance(token_id, int): raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}") stepped = False completed = False reset = False if self.does_advance(token_id): self.current_seq.append(token_id) stepped = True else: reset = True self.reset() completed = self.trie.reached_leaf(self.current_seq) self.completed = completed return stepped, completed, reset def reset(self): self.completed = False self.current_seq = [] def remaining(self): if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq) def copy(self, stateful=False): new_constraint = DisjunctiveConstraint(self.token_ids) if stateful: new_constraint.seq_len = self.seqlen new_constraint.current_seq = self.current_seq new_constraint.completed = self.completed return new_constraint class ConstraintListState: r""" A class for beam scorers to track its progress through a list of constraints. Args: constraints (`List[Constraint]`): A list of [`Constraint`] objects that must be fulfilled by the beam scorer. """ def __init__(self, constraints: List[Constraint]): self.constraints = constraints # max # of steps required to fulfill a given constraint self.max_seqlen = max([c.seqlen for c in constraints]) self.n_constraints = len(constraints) self.completed = False self.init_state() def init_state(self): self.complete_constraints = [] self.inprogress_constraint = None self.pending_constraints = [constraint.copy(stateful=False) for constraint in self.constraints] def get_bank(self): add = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints) * self.max_seqlen) + add def advance(self): """The list of tokens to generate such that we can make progress. By "list" we don't mean the list of token that will fully fulfill a constraint. Given constraints `c_i = {t_ij | j == # of tokens}`, If we're not in the middle of progressing through a specific constraint `c_i`, we return: `[t_k1 for k in indices of unfulfilled constraints]` If we are in the middle of a constraint, then we return: `[t_ij]`, where `i` is the index of the inprogress constraint, `j` is the next step for the constraint. Though we don't care which constraint is fulfilled first, if we are in the progress of fulfilling a constraint, that's the only one we'll return. """ token_list = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" advance = constraint.advance() if isinstance(advance, int): token_list.append(advance) elif isinstance(advance, list): token_list.extend(advance) else: advance = self.inprogress_constraint.advance() if isinstance(advance, int): token_list.append(advance) elif isinstance(advance, list): token_list.extend(advance) if len(token_list) == 0: return None else: return token_list def reset(self, token_ids: Optional[List[int]]): """ token_ids: the tokens generated thus far to reset the state of the progress through constraints. """ self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint complete, stepped = self.add(token) # the entire list of constraints are fulfilled if self.completed: break def add(self, token_id: int): if not isinstance(token_id, int): raise ValueError(f"`token_id` should be an `int`, but is `{token_id}`.") complete, stepped = False, False if self.completed: complete = True stepped = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state stepped, complete, reset = self.inprogress_constraint.update(token_id) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=False)) self.inprogress_constraint = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint) self.inprogress_constraint = None if len(self.pending_constraints) == 0: # we're done! self.completed = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints): if pending_constraint.does_advance(token_id): stepped, complete, reset = pending_constraint.update(token_id) if not stepped: raise Exception( "`constraint.update(token_id)` is not yielding incremental progress, " "even though `constraint.does_advance(token_id)` is true." ) if complete: self.complete_constraints.append(pending_constraint) self.inprogress_constraint = None if not complete and stepped: self.inprogress_constraint = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". self.pending_constraints = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. self.completed = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def copy(self, stateful=True): new_state = ConstraintListState(self.constraints) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: new_state.complete_constraints = [ constraint.copy(stateful=True) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: new_state.inprogress_constraint = self.inprogress_constraint.copy(stateful=True) new_state.pending_constraints = [constraint.copy() for constraint in self.pending_constraints] return new_state
19,089
35.641075
121
py
transformers
transformers-main/.circleci/create_circleci_config.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import copy import glob import os import random from dataclasses import dataclass from typing import Any, Dict, List, Optional import yaml COMMON_ENV_VARIABLES = { "OMP_NUM_THREADS": 1, "TRANSFORMERS_IS_CI": True, "PYTEST_TIMEOUT": 120, "RUN_PIPELINE_TESTS": False, "RUN_PT_TF_CROSS_TESTS": False, "RUN_PT_FLAX_CROSS_TESTS": False, } COMMON_PYTEST_OPTIONS = {"max-worker-restart": 0, "dist": "loadfile", "s": None} DEFAULT_DOCKER_IMAGE = [{"image": "cimg/python:3.8.12"}] class EmptyJob: job_name = "empty" def to_dict(self): return { "working_directory": "~/transformers", "docker": copy.deepcopy(DEFAULT_DOCKER_IMAGE), "steps":["checkout"], } @dataclass class CircleCIJob: name: str additional_env: Dict[str, Any] = None cache_name: str = None cache_version: str = "0.7" docker_image: List[Dict[str, str]] = None install_steps: List[str] = None marker: Optional[str] = None parallelism: Optional[int] = 1 pytest_num_workers: int = 8 pytest_options: Dict[str, Any] = None resource_class: Optional[str] = "xlarge" tests_to_run: Optional[List[str]] = None working_directory: str = "~/transformers" # This should be only used for doctest job! command_timeout: Optional[int] = None def __post_init__(self): # Deal with defaults for mutable attributes. if self.additional_env is None: self.additional_env = {} if self.cache_name is None: self.cache_name = self.name if self.docker_image is None: # Let's avoid changing the default list and make a copy. self.docker_image = copy.deepcopy(DEFAULT_DOCKER_IMAGE) if self.install_steps is None: self.install_steps = [] if self.pytest_options is None: self.pytest_options = {} if isinstance(self.tests_to_run, str): self.tests_to_run = [self.tests_to_run] if self.parallelism is None: self.parallelism = 1 def to_dict(self): env = COMMON_ENV_VARIABLES.copy() env.update(self.additional_env) job = { "working_directory": self.working_directory, "docker": self.docker_image, "environment": env, } if self.resource_class is not None: job["resource_class"] = self.resource_class if self.parallelism is not None: job["parallelism"] = self.parallelism steps = [ "checkout", {"attach_workspace": {"at": "~/transformers/test_preparation"}}, { "restore_cache": { "keys": [ f"v{self.cache_version}-{self.cache_name}-pip-" + '{{ checksum "setup.py" }}', f"v{self.cache_version}-{self.cache_name}-pip-", ] } }, { "restore_cache": { "keys": [ f"v{self.cache_version}-{self.cache_name}-site-packages-" + '{{ checksum "setup.py" }}', f"v{self.cache_version}-{self.cache_name}-site-packages-", ] } }, ] steps.extend([{"run": l} for l in self.install_steps]) steps.append( { "save_cache": { "key": f"v{self.cache_version}-{self.cache_name}-pip-" + '{{ checksum "setup.py" }}', "paths": ["~/.cache/pip"], } } ) steps.append( { "save_cache": { "key": f"v{self.cache_version}-{self.cache_name}-site-packages-" + '{{ checksum "setup.py" }}', "paths": ["~/.pyenv/versions/"], } } ) steps.append({"run": {"name": "Show installed libraries and their versions", "command": "pip freeze | tee installed.txt"}}) steps.append({"store_artifacts": {"path": "~/transformers/installed.txt"}}) all_options = {**COMMON_PYTEST_OPTIONS, **self.pytest_options} pytest_flags = [f"--{key}={value}" if (value is not None or key in ["doctest-modules"]) else f"-{key}" for key, value in all_options.items()] pytest_flags.append( f"--make-reports={self.name}" if "examples" in self.name else f"--make-reports=tests_{self.name}" ) test_command = "" if self.command_timeout: test_command = f"timeout {self.command_timeout} " test_command += f"python -m pytest -n {self.pytest_num_workers} " + " ".join(pytest_flags) if self.parallelism == 1: if self.tests_to_run is None: test_command += " << pipeline.parameters.tests_to_run >>" else: test_command += " " + " ".join(self.tests_to_run) else: # We need explicit list instead of `pipeline.parameters.tests_to_run` (only available at job runtime) tests = self.tests_to_run if tests is None: folder = os.environ["test_preparation_dir"] test_file = os.path.join(folder, "filtered_test_list.txt") if os.path.exists(test_file): with open(test_file) as f: tests = f.read().split(" ") # expand the test list if tests == ["tests"]: tests = [os.path.join("tests", x) for x in os.listdir("tests")] expanded_tests = [] for test in tests: if test.endswith(".py"): expanded_tests.append(test) elif test == "tests/models": expanded_tests.extend([os.path.join(test, x) for x in os.listdir(test)]) elif test == "tests/pipelines": expanded_tests.extend([os.path.join(test, x) for x in os.listdir(test)]) else: expanded_tests.append(test) # Avoid long tests always being collected together random.shuffle(expanded_tests) tests = " ".join(expanded_tests) # Each executor to run ~10 tests n_executors = max(len(tests) // 10, 1) # Avoid empty test list on some executor(s) or launching too many executors if n_executors > self.parallelism: n_executors = self.parallelism job["parallelism"] = n_executors # Need to be newline separated for the command `circleci tests split` below command = f'echo {tests} | tr " " "\\n" >> tests.txt' steps.append({"run": {"name": "Get tests", "command": command}}) command = 'TESTS=$(circleci tests split tests.txt) && echo $TESTS > splitted_tests.txt' steps.append({"run": {"name": "Split tests", "command": command}}) steps.append({"store_artifacts": {"path": "~/transformers/tests.txt"}}) steps.append({"store_artifacts": {"path": "~/transformers/splitted_tests.txt"}}) test_command = "" if self.timeout: test_command = f"timeout {self.timeout} " test_command += f"python -m pytest -n {self.pytest_num_workers} " + " ".join(pytest_flags) test_command += " $(cat splitted_tests.txt)" if self.marker is not None: test_command += f" -m {self.marker}" if self.name == "pr_documentation_tests": # can't use ` | tee tee tests_output.txt` as usual test_command += " > tests_output.txt" # Save the return code, so we can check if it is timeout in the next step. test_command += '; touch "$?".txt' # Never fail the test step for the doctest job. We will check the results in the next step, and fail that # step instead if the actual test failures are found. This is to avoid the timeout being reported as test # failure. test_command = f"({test_command}) || true" else: test_command += " | tee tests_output.txt" steps.append({"run": {"name": "Run tests", "command": test_command}}) # return code `124` means the previous (pytest run) step is timeout if self.name == "pr_documentation_tests": checkout_doctest_command = 'if [ -s reports/tests_pr_documentation_tests/failures_short.txt ]; ' checkout_doctest_command += 'then echo "some test failed"; ' checkout_doctest_command += 'cat reports/tests_pr_documentation_tests/failures_short.txt; ' checkout_doctest_command += 'cat reports/tests_pr_documentation_tests/summary_short.txt; exit -1; ' checkout_doctest_command += 'elif [ -s reports/tests_pr_documentation_tests/stats.txt ]; then echo "All tests pass!"; ' checkout_doctest_command += 'elif [ -f 124.txt ]; then echo "doctest timeout!"; else echo "other fatal error)"; exit -1; fi;' steps.append({"run": {"name": "Check doctest results", "command": checkout_doctest_command}}) steps.append({"store_artifacts": {"path": "~/transformers/tests_output.txt"}}) steps.append({"store_artifacts": {"path": "~/transformers/reports"}}) job["steps"] = steps return job @property def job_name(self): return self.name if "examples" in self.name else f"tests_{self.name}" # JOBS torch_and_tf_job = CircleCIJob( "torch_and_tf", additional_env={"RUN_PT_TF_CROSS_TESTS": True}, install_steps=[ "sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng git-lfs cmake", "git lfs install", "pip install --upgrade --upgrade-strategy eager pip", "pip install -U --upgrade-strategy eager .[sklearn,tf-cpu,torch,testing,sentencepiece,torch-speech,vision]", "pip install -U --upgrade-strategy eager tensorflow_probability", "pip install -U --upgrade-strategy eager git+https://github.com/huggingface/accelerate", ], marker="is_pt_tf_cross_test", pytest_options={"rA": None, "durations": 0}, ) torch_and_flax_job = CircleCIJob( "torch_and_flax", additional_env={"RUN_PT_FLAX_CROSS_TESTS": True}, install_steps=[ "sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng", "pip install -U --upgrade-strategy eager --upgrade pip", "pip install -U --upgrade-strategy eager .[sklearn,flax,torch,testing,sentencepiece,torch-speech,vision]", "pip install -U --upgrade-strategy eager git+https://github.com/huggingface/accelerate", ], marker="is_pt_flax_cross_test", pytest_options={"rA": None, "durations": 0}, ) torch_job = CircleCIJob( "torch", install_steps=[ "sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng time", "pip install --upgrade --upgrade-strategy eager pip", "pip install -U --upgrade-strategy eager .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm]", "pip install -U --upgrade-strategy eager git+https://github.com/huggingface/accelerate", ], parallelism=1, pytest_num_workers=3, ) tf_job = CircleCIJob( "tf", install_steps=[ "sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng cmake", "pip install --upgrade --upgrade-strategy eager pip", "pip install -U --upgrade-strategy eager .[sklearn,tf-cpu,testing,sentencepiece,tf-speech,vision]", "pip install -U --upgrade-strategy eager tensorflow_probability", ], parallelism=1, pytest_num_workers=6, pytest_options={"rA": None}, ) flax_job = CircleCIJob( "flax", install_steps=[ "sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng", "pip install --upgrade --upgrade-strategy eager pip", "pip install -U --upgrade-strategy eager .[flax,testing,sentencepiece,flax-speech,vision]", ], parallelism=1, pytest_options={"rA": None}, ) pipelines_torch_job = CircleCIJob( "pipelines_torch", additional_env={"RUN_PIPELINE_TESTS": True}, install_steps=[ "sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng", "pip install --upgrade --upgrade-strategy eager pip", "pip install -U --upgrade-strategy eager .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm,video]", ], pytest_options={"rA": None}, marker="is_pipeline_test", ) pipelines_tf_job = CircleCIJob( "pipelines_tf", additional_env={"RUN_PIPELINE_TESTS": True}, install_steps=[ "sudo apt-get -y update && sudo apt-get install -y cmake", "pip install --upgrade --upgrade-strategy eager pip", "pip install -U --upgrade-strategy eager .[sklearn,tf-cpu,testing,sentencepiece,vision]", "pip install -U --upgrade-strategy eager tensorflow_probability", ], pytest_options={"rA": None}, marker="is_pipeline_test", ) custom_tokenizers_job = CircleCIJob( "custom_tokenizers", additional_env={"RUN_CUSTOM_TOKENIZERS": True}, install_steps=[ "sudo apt-get -y update && sudo apt-get install -y cmake", { "name": "install jumanpp", "command": "wget https://github.com/ku-nlp/jumanpp/releases/download/v2.0.0-rc3/jumanpp-2.0.0-rc3.tar.xz\n" "tar xvf jumanpp-2.0.0-rc3.tar.xz\n" "mkdir jumanpp-2.0.0-rc3/bld\n" "cd jumanpp-2.0.0-rc3/bld\n" "sudo cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local\n" "sudo make install\n", }, "pip install --upgrade --upgrade-strategy eager pip", "pip install -U --upgrade-strategy eager .[ja,testing,sentencepiece,jieba,spacy,ftfy,rjieba]", "python -m unidic download", ], parallelism=None, resource_class=None, tests_to_run=[ "./tests/models/bert_japanese/test_tokenization_bert_japanese.py", "./tests/models/openai/test_tokenization_openai.py", "./tests/models/clip/test_tokenization_clip.py", ], ) examples_torch_job = CircleCIJob( "examples_torch", cache_name="torch_examples", install_steps=[ "sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng", "pip install --upgrade --upgrade-strategy eager pip", "pip install -U --upgrade-strategy eager .[sklearn,torch,sentencepiece,testing,torch-speech]", "pip install -U --upgrade-strategy eager -r examples/pytorch/_tests_requirements.txt", ], ) examples_tensorflow_job = CircleCIJob( "examples_tensorflow", cache_name="tensorflow_examples", install_steps=[ "sudo apt-get -y update && sudo apt-get install -y cmake", "pip install --upgrade --upgrade-strategy eager pip", "pip install -U --upgrade-strategy eager .[sklearn,tensorflow,sentencepiece,testing]", "pip install -U --upgrade-strategy eager -r examples/tensorflow/_tests_requirements.txt", ], ) examples_flax_job = CircleCIJob( "examples_flax", cache_name="flax_examples", install_steps=[ "pip install --upgrade --upgrade-strategy eager pip", "pip install -U --upgrade-strategy eager .[flax,testing,sentencepiece]", "pip install -U --upgrade-strategy eager -r examples/flax/_tests_requirements.txt", ], ) hub_job = CircleCIJob( "hub", additional_env={"HUGGINGFACE_CO_STAGING": True}, install_steps=[ "sudo apt-get -y update && sudo apt-get install git-lfs", 'git config --global user.email "ci@dummy.com"', 'git config --global user.name "ci"', "pip install --upgrade --upgrade-strategy eager pip", "pip install -U --upgrade-strategy eager .[torch,sentencepiece,testing,vision]", ], marker="is_staging_test", pytest_num_workers=1, ) onnx_job = CircleCIJob( "onnx", install_steps=[ "sudo apt-get -y update && sudo apt-get install -y cmake", "pip install --upgrade --upgrade-strategy eager pip", "pip install -U --upgrade-strategy eager .[torch,tf,testing,sentencepiece,onnxruntime,vision,rjieba]", ], pytest_options={"k onnx": None}, pytest_num_workers=1, ) exotic_models_job = CircleCIJob( "exotic_models", install_steps=[ "sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev", "pip install --upgrade --upgrade-strategy eager pip", "pip install -U --upgrade-strategy eager .[torch,testing,vision]", "pip install -U --upgrade-strategy eager torchvision", "pip install -U --upgrade-strategy eager scipy", "pip install -U --upgrade-strategy eager 'git+https://github.com/facebookresearch/detectron2.git'", "sudo apt install tesseract-ocr", "pip install -U --upgrade-strategy eager pytesseract", "pip install -U --upgrade-strategy eager natten", # TODO (ydshieh): Remove this line once `https://github.com/facebookresearch/detectron2/issues/5010` is resolved 'pip install -U --upgrade-strategy eager "Pillow<10.0.0"', ], tests_to_run=[ "tests/models/*layoutlmv*", "tests/models/*nat", "tests/models/deta", ], pytest_num_workers=1, pytest_options={"durations": 100}, ) repo_utils_job = CircleCIJob( "repo_utils", install_steps=[ "pip install --upgrade --upgrade-strategy eager pip", "pip install -U --upgrade-strategy eager .[quality,testing,torch]", ], parallelism=None, pytest_num_workers=1, resource_class="large", tests_to_run="tests/repo_utils", ) # We also include a `dummy.py` file in the files to be doc-tested to prevent edge case failure. Otherwise, the pytest # hangs forever during test collection while showing `collecting 0 items / 21 errors`. (To see this, we have to remove # the bash output redirection.) py_command = 'from utils.tests_fetcher import get_doctest_files; to_test = get_doctest_files() + ["dummy.py"]; to_test = " ".join(to_test); print(to_test)' py_command = f"$(python3 -c '{py_command}')" command = f'echo "{py_command}" > pr_documentation_tests_temp.txt' doc_test_job = CircleCIJob( "pr_documentation_tests", additional_env={"TRANSFORMERS_VERBOSITY": "error", "DATASETS_VERBOSITY": "error", "SKIP_CUDA_DOCTEST": "1"}, install_steps=[ "sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng time ffmpeg", "pip install --upgrade --upgrade-strategy eager pip", "pip install -U --upgrade-strategy eager -e .[dev]", "pip install -U --upgrade-strategy eager git+https://github.com/huggingface/accelerate", "pip install --upgrade --upgrade-strategy eager pytest pytest-sugar", "pip install -U --upgrade-strategy eager natten", "find -name __pycache__ -delete", "find . -name \*.pyc -delete", # Add an empty file to keep the test step running correctly even no file is selected to be tested. "touch dummy.py", { "name": "Get files to test", "command": command, }, { "name": "Show information in `Get files to test`", "command": "cat pr_documentation_tests_temp.txt" }, { "name": "Get the last line in `pr_documentation_tests.txt`", "command": "tail -n1 pr_documentation_tests_temp.txt | tee pr_documentation_tests.txt" }, ], tests_to_run="$(cat pr_documentation_tests.txt)", # noqa pytest_options={"-doctest-modules": None, "doctest-glob": "*.md", "dist": "loadfile", "rvsA": None}, command_timeout=1200, # test cannot run longer than 1200 seconds pytest_num_workers=1, ) REGULAR_TESTS = [ torch_and_tf_job, torch_and_flax_job, torch_job, tf_job, flax_job, custom_tokenizers_job, hub_job, onnx_job, exotic_models_job, ] EXAMPLES_TESTS = [ examples_torch_job, examples_tensorflow_job, examples_flax_job, ] PIPELINE_TESTS = [ pipelines_torch_job, pipelines_tf_job, ] REPO_UTIL_TESTS = [repo_utils_job] DOC_TESTS = [doc_test_job] def create_circleci_config(folder=None): if folder is None: folder = os.getcwd() # Used in CircleCIJob.to_dict() to expand the test list (for using parallelism) os.environ["test_preparation_dir"] = folder jobs = [] all_test_file = os.path.join(folder, "test_list.txt") if os.path.exists(all_test_file): with open(all_test_file) as f: all_test_list = f.read() else: all_test_list = [] if len(all_test_list) > 0: jobs.extend(PIPELINE_TESTS) test_file = os.path.join(folder, "filtered_test_list.txt") if os.path.exists(test_file): with open(test_file) as f: test_list = f.read() else: test_list = [] if len(test_list) > 0: jobs.extend(REGULAR_TESTS) extended_tests_to_run = set(test_list.split()) # Extend the test files for cross test jobs for job in jobs: if job.job_name in ["tests_torch_and_tf", "tests_torch_and_flax"]: for test_path in copy.copy(extended_tests_to_run): dir_path, fn = os.path.split(test_path) if fn.startswith("test_modeling_tf_"): fn = fn.replace("test_modeling_tf_", "test_modeling_") elif fn.startswith("test_modeling_flax_"): fn = fn.replace("test_modeling_flax_", "test_modeling_") else: if job.job_name == "test_torch_and_tf": fn = fn.replace("test_modeling_", "test_modeling_tf_") elif job.job_name == "test_torch_and_flax": fn = fn.replace("test_modeling_", "test_modeling_flax_") new_test_file = str(os.path.join(dir_path, fn)) if os.path.isfile(new_test_file): if new_test_file not in extended_tests_to_run: extended_tests_to_run.add(new_test_file) extended_tests_to_run = sorted(extended_tests_to_run) for job in jobs: if job.job_name in ["tests_torch_and_tf", "tests_torch_and_flax"]: job.tests_to_run = extended_tests_to_run fn = "filtered_test_list_cross_tests.txt" f_path = os.path.join(folder, fn) with open(f_path, "w") as fp: fp.write(" ".join(extended_tests_to_run)) example_file = os.path.join(folder, "examples_test_list.txt") if os.path.exists(example_file) and os.path.getsize(example_file) > 0: with open(example_file, "r", encoding="utf-8") as f: example_tests = f.read() for job in EXAMPLES_TESTS: framework = job.name.replace("examples_", "").replace("torch", "pytorch") if example_tests == "all": job.tests_to_run = [f"examples/{framework}"] else: job.tests_to_run = [f for f in example_tests.split(" ") if f.startswith(f"examples/{framework}")] if len(job.tests_to_run) > 0: jobs.append(job) doctest_file = os.path.join(folder, "doctest_list.txt") if os.path.exists(doctest_file): with open(doctest_file) as f: doctest_list = f.read() else: doctest_list = [] if len(doctest_list) > 0: jobs.extend(DOC_TESTS) repo_util_file = os.path.join(folder, "test_repo_utils.txt") if os.path.exists(repo_util_file) and os.path.getsize(repo_util_file) > 0: jobs.extend(REPO_UTIL_TESTS) if len(jobs) == 0: jobs = [EmptyJob()] config = {"version": "2.1"} config["parameters"] = { # Only used to accept the parameters from the trigger "nightly": {"type": "boolean", "default": False}, "tests_to_run": {"type": "string", "default": test_list}, } config["jobs"] = {j.job_name: j.to_dict() for j in jobs} config["workflows"] = {"version": 2, "run_tests": {"jobs": [j.job_name for j in jobs]}} with open(os.path.join(folder, "generated_config.yml"), "w") as f: f.write(yaml.dump(config, indent=2, width=1000000, sort_keys=False)) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--fetcher_folder", type=str, default=None, help="Only test that all tests and modules are accounted for." ) args = parser.parse_args() create_circleci_config(args.fetcher_folder)
25,350
39.239683
155
py
transformers
transformers-main/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/tokenization_{{cookiecutter.lowercase_modelname}}.py
# coding=utf-8 # Copyright 2022 {{cookiecutter.authors}} and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for {{cookiecutter.modelname}}.""" {%- if cookiecutter.tokenizer_type == "Based on BERT" %} from ...utils import logging from ..bert.tokenization_bert import BertTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/vocab.txt", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "{{cookiecutter.checkpoint_identifier}}": 512, } PRETRAINED_INIT_CONFIGURATION = { "{{cookiecutter.checkpoint_identifier}}": {"do_lower_case": False}, } class {{cookiecutter.camelcase_modelname}}Tokenizer(BertTokenizer): r""" Construct a {{cookiecutter.modelname}} tokenizer. [`~{{cookiecutter.camelcase_modelname}}Tokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass [`BertTokenizer`] for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION {%- elif cookiecutter.tokenizer_type == "Based on BART" %} from ...utils import logging from ..bart.tokenization_bart import BartTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/vocab.json", }, "merges_file": { "{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/merges.txt", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "{{cookiecutter.checkpoint_identifier}}": 1024, } class {{cookiecutter.camelcase_modelname}}Tokenizer(BartTokenizer): """ Construct a {{cookiecutter.modelname}} tokenizer. [`~{{cookiecutter.camelcase_modelname}}Tokenizer`] is identical to [`BartTokenizer`] and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass [`BartTokenizer`] for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES {%- elif cookiecutter.tokenizer_type == "Standalone" %} from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/vocab.txt", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "{{cookiecutter.checkpoint_identifier}}": 1024, } class {{cookiecutter.camelcase_modelname}}Tokenizer(PreTrainedTokenizer): """ Construct a {{cookiecutter.modelname}} tokenizer. Based on byte-level Byte-Pair-Encoding. Args: vocab_file (`str`): Path to the vocabulary file. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, unk_token="<|endoftext|>", bos_token="<|endoftext|>", eos_token="<|endoftext|>", **kwargs ): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs) """ Initialisation """ @property def vocab_size(self): """ Returns vocab size """ def get_vocab(self): """ Returns vocab as a dict """ def _tokenize(self, text): """ Returns a tokenized string. """ def _convert_token_to_id(self, token): """ Converts a token (str) in an id using the vocab. """ def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (string) in a single string. """ def save_vocabulary(self, save_directory): """ Save the vocabulary and special tokens file to a directory. Args: save_directory (`str`): The directory in which to save the vocabulary. Returns: `Tuple(str)`: Paths to the files saved. """ def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A {{cookiecutter.modelname}} sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. {{cookiecutter.modelname}} does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()): text = " " + text return (text, kwargs) class {{cookiecutter.camelcase_modelname}}TokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" {{cookiecutter.modelname}} tokenizer (backed by HuggingFace's *tokenizers* library). Args: vocab_file (`str`): Path to the vocabulary file. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, merges_file, unk_token="<|endoftext|>", bos_token="<|endoftext|>", eos_token="<|endoftext|>", add_prefix_space=False, trim_offsets=True, **kwargs ): super().__init__( ByteLevelBPETokenizer( vocab_file=vocab_file, merges_file=merges_file, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, ), bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs, ) self.add_prefix_space = add_prefix_space def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id] if token_ids_1 is None: return output return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. {{cookiecutter.modelname}} does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] {% endif %}
12,042
35.165165
138
py
transformers
transformers-main/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/tokenization_fast_{{cookiecutter.lowercase_modelname}}.py
# coding=utf-8 # Copyright 2022 {{cookiecutter.authors}} and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for {{cookiecutter.modelname}}.""" {%- if cookiecutter.tokenizer_type == "Based on BERT" %} from ...utils import logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Tokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/vocab.txt", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "{{cookiecutter.checkpoint_identifier}}": 512, } PRETRAINED_INIT_CONFIGURATION = { "{{cookiecutter.checkpoint_identifier}}": {"do_lower_case": False}, } class {{cookiecutter.camelcase_modelname}}TokenizerFast(BertTokenizerFast): r""" Construct a "fast" {{cookiecutter.modelname}} tokenizer (backed by HuggingFace's *tokenizers* library). [`~{{cookiecutter.camelcase_modelname}}TokenizerFast`] is identical to [`BertTokenizerFast`] and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass [`BertTokenizerFast`] for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION slow_tokenizer_class = {{cookiecutter.camelcase_modelname}}Tokenizer {%- elif cookiecutter.tokenizer_type == "Based on BART" %} from ...utils import logging from ..bart.tokenization_bart_fast import BartTokenizerFast from .tokenization_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Tokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/vocab.json", }, "merges_file": { "{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/merges.txt", }, "tokenizer_file": { "{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/tokenizer.json", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "{{cookiecutter.checkpoint_identifier}}": 1024, } class {{cookiecutter.camelcase_modelname}}TokenizerFast(BartTokenizerFast): r""" Construct a "fast" {{cookiecutter.modelname}} tokenizer (backed by HuggingFace's *tokenizers* library). [`~{{cookiecutter.camelcase_modelname}}TokenizerFast`] is identical to [`BartTokenizerFast`] and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass [`BartTokenizerFast`] for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES slow_tokenizer_class = {{cookiecutter.camelcase_modelname}}Tokenizer {%- elif cookiecutter.tokenizer_type == "Standalone" %} from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Tokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/vocab.txt", }, "tokenizer_file": { "{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/tokenizer.json", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "{{cookiecutter.checkpoint_identifier}}": 1024, } class {{cookiecutter.camelcase_modelname}}TokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" {{cookiecutter.modelname}} tokenizer (backed by HuggingFace's *tokenizers* library). Args: vocab_file (`str`): Path to the vocabulary file. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES slow_tokenizer_class = {{cookiecutter.camelcase_modelname}}Tokenizer def __init__( self, vocab_file, merges_file, unk_token="<|endoftext|>", bos_token="<|endoftext|>", eos_token="<|endoftext|>", add_prefix_space=False, trim_offsets=True, **kwargs ): super().__init__( ByteLevelBPETokenizer( vocab_file=vocab_file, merges_file=merges_file, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, ), bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs, ) self.add_prefix_space = add_prefix_space def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id] if token_ids_1 is None: return output return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. {{cookiecutter.modelname}} does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] {% endif %}
7,417
35.722772
142
py
transformers
transformers-main/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/to_replace_{{cookiecutter.lowercase_modelname}}.py
## Copyright 2022 The HuggingFace Team. All rights reserved. ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. ## This file is made so that specific statements may be copied inside existing files. This is useful to copy ## import statements in __init__.py, or to complete model lists in the AUTO files. ## ## It is to be used as such: ## Put '# To replace in: "FILE_PATH"' in order to indicate the contents will be copied in the file at path FILE_PATH ## Put '# Below: "STATEMENT"' in order to copy the contents below **the first occurence** of that line in the file at FILE_PATH ## Put '# Replace with:' followed by the lines containing the content to define the content ## End a statement with '# End.'. If starting a new statement without redefining the FILE_PATH, it will continue pasting ## content in that file. ## ## Put '## COMMENT' to comment on the file. # To replace in: "src/transformers/__init__.py" # Below: " # PyTorch models structure" if generating PyTorch # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" %} _import_structure["models.{{cookiecutter.lowercase_modelname}}"].extend( [ "{{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST", "{{cookiecutter.camelcase_modelname}}ForMaskedLM", "{{cookiecutter.camelcase_modelname}}ForCausalLM", "{{cookiecutter.camelcase_modelname}}ForMultipleChoice", "{{cookiecutter.camelcase_modelname}}ForQuestionAnswering", "{{cookiecutter.camelcase_modelname}}ForSequenceClassification", "{{cookiecutter.camelcase_modelname}}ForTokenClassification", "{{cookiecutter.camelcase_modelname}}Layer", "{{cookiecutter.camelcase_modelname}}Model", "{{cookiecutter.camelcase_modelname}}PreTrainedModel", "load_tf_weights_in_{{cookiecutter.lowercase_modelname}}", ] ) {% else %} _import_structure["models.{{cookiecutter.lowercase_modelname}}"].extend( [ "{{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST", "{{cookiecutter.camelcase_modelname}}ForCausalLM", "{{cookiecutter.camelcase_modelname}}ForConditionalGeneration", "{{cookiecutter.camelcase_modelname}}ForQuestionAnswering", "{{cookiecutter.camelcase_modelname}}ForSequenceClassification", "{{cookiecutter.camelcase_modelname}}Model", "{{cookiecutter.camelcase_modelname}}PreTrainedModel", ] ) {% endif -%} # End. # Below: " # TensorFlow models structure" if generating TensorFlow # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" %} _import_structure["models.{{cookiecutter.lowercase_modelname}}"].extend( [ "TF_{{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST", "TF{{cookiecutter.camelcase_modelname}}ForMaskedLM", "TF{{cookiecutter.camelcase_modelname}}ForCausalLM", "TF{{cookiecutter.camelcase_modelname}}ForMultipleChoice", "TF{{cookiecutter.camelcase_modelname}}ForQuestionAnswering", "TF{{cookiecutter.camelcase_modelname}}ForSequenceClassification", "TF{{cookiecutter.camelcase_modelname}}ForTokenClassification", "TF{{cookiecutter.camelcase_modelname}}Layer", "TF{{cookiecutter.camelcase_modelname}}Model", "TF{{cookiecutter.camelcase_modelname}}PreTrainedModel", ] ) {% else %} _import_structure["models.{{cookiecutter.lowercase_modelname}}"].extend( [ "TF{{cookiecutter.camelcase_modelname}}ForConditionalGeneration", "TF{{cookiecutter.camelcase_modelname}}Model", "TF{{cookiecutter.camelcase_modelname}}PreTrainedModel", ] ) {% endif -%} # End. # Below: " # Flax models structure" if generating Flax # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" %} _import_structure["models.{{cookiecutter.lowercase_modelname}}"].extend( [ "Flax{{cookiecutter.camelcase_modelname}}ForMaskedLM", "Flax{{cookiecutter.camelcase_modelname}}ForCausalLM", "Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoice", "Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering", "Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification", "Flax{{cookiecutter.camelcase_modelname}}ForTokenClassification", "Flax{{cookiecutter.camelcase_modelname}}Layer", "Flax{{cookiecutter.camelcase_modelname}}Model", "Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel", ] ) {% else %} _import_structure["models.{{cookiecutter.lowercase_modelname}}"].extend( [ "Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration", "Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering", "Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification", "Flax{{cookiecutter.camelcase_modelname}}Model", "Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel", ] ) {% endif -%} # End. # Below: " # Fast tokenizers structure" # Replace with: _import_structure["models.{{cookiecutter.lowercase_modelname}}"].append("{{cookiecutter.camelcase_modelname}}TokenizerFast") # End. # Below: " # Models" # Replace with: "models.{{cookiecutter.lowercase_modelname}}": ["{{cookiecutter.uppercase_modelname}}_PRETRAINED_CONFIG_ARCHIVE_MAP", "{{cookiecutter.camelcase_modelname}}Config", "{{cookiecutter.camelcase_modelname}}Tokenizer"], # End. # To replace in: "src/transformers/__init__.py" # Below: " # PyTorch model imports" if generating PyTorch # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" %} from .models.{{cookiecutter.lowercase_modelname}} import ( {{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST, {{cookiecutter.camelcase_modelname}}ForMaskedLM, {{cookiecutter.camelcase_modelname}}ForCausalLM, {{cookiecutter.camelcase_modelname}}ForMultipleChoice, {{cookiecutter.camelcase_modelname}}ForQuestionAnswering, {{cookiecutter.camelcase_modelname}}ForSequenceClassification, {{cookiecutter.camelcase_modelname}}ForTokenClassification, {{cookiecutter.camelcase_modelname}}Layer, {{cookiecutter.camelcase_modelname}}Model, {{cookiecutter.camelcase_modelname}}PreTrainedModel, load_tf_weights_in_{{cookiecutter.lowercase_modelname}}, ) {% else %} from .models.{{cookiecutter.lowercase_modelname}} import ( {{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST, {{cookiecutter.camelcase_modelname}}ForConditionalGeneration, {{cookiecutter.camelcase_modelname}}ForCausalLM, {{cookiecutter.camelcase_modelname}}ForQuestionAnswering, {{cookiecutter.camelcase_modelname}}ForSequenceClassification, {{cookiecutter.camelcase_modelname}}Model, {{cookiecutter.camelcase_modelname}}PreTrainedModel, ) {% endif -%} # End. # Below: " # TensorFlow model imports" if generating TensorFlow # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" %} from .models.{{cookiecutter.lowercase_modelname}} import ( TF_{{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST, TF{{cookiecutter.camelcase_modelname}}ForMaskedLM, TF{{cookiecutter.camelcase_modelname}}ForCausalLM, TF{{cookiecutter.camelcase_modelname}}ForMultipleChoice, TF{{cookiecutter.camelcase_modelname}}ForQuestionAnswering, TF{{cookiecutter.camelcase_modelname}}ForSequenceClassification, TF{{cookiecutter.camelcase_modelname}}ForTokenClassification, TF{{cookiecutter.camelcase_modelname}}Layer, TF{{cookiecutter.camelcase_modelname}}Model, TF{{cookiecutter.camelcase_modelname}}PreTrainedModel, ) {% else %} from .models.{{cookiecutter.lowercase_modelname}} import ( TF{{cookiecutter.camelcase_modelname}}ForConditionalGeneration, TF{{cookiecutter.camelcase_modelname}}Model, TF{{cookiecutter.camelcase_modelname}}PreTrainedModel, ) {% endif -%} # End. # Below: " # Flax model imports" if generating Flax # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" %} from .models.{{cookiecutter.lowercase_modelname}} import ( Flax{{cookiecutter.camelcase_modelname}}ForMaskedLM, Flax{{cookiecutter.camelcase_modelname}}ForCausalLM, Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoice, Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering, Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification, Flax{{cookiecutter.camelcase_modelname}}ForTokenClassification, Flax{{cookiecutter.camelcase_modelname}}Layer, Flax{{cookiecutter.camelcase_modelname}}Model, Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel, ) {% else %} from .models.{{cookiecutter.lowercase_modelname}} import ( Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration, Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering, Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification, Flax{{cookiecutter.camelcase_modelname}}Model, Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel, ) {% endif -%} # End. # Below: " # Fast tokenizers imports" # Replace with: from .models.{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}TokenizerFast # End. # Below: " from .models.albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig" # Replace with: from .models.{{cookiecutter.lowercase_modelname}} import {{cookiecutter.uppercase_modelname}}_PRETRAINED_CONFIG_ARCHIVE_MAP, {{cookiecutter.camelcase_modelname}}Config, {{cookiecutter.camelcase_modelname}}Tokenizer # End. # To replace in: "src/transformers/models/__init__.py" # Below: "from . import (" # Replace with: {{cookiecutter.lowercase_modelname}}, # End. # To replace in: "src/transformers/models/auto/configuration_auto.py" # Below: "# Add configs here" # Replace with: ("{{cookiecutter.lowercase_modelname}}", "{{cookiecutter.camelcase_modelname}}Config"), # End. # Below: "# Add archive maps here" # Replace with: ("{{cookiecutter.lowercase_modelname}}", "{{cookiecutter.uppercase_modelname}}_PRETRAINED_CONFIG_ARCHIVE_MAP"), # End. # Below: "# Add full (and cased) model names here" # Replace with: ("{{cookiecutter.lowercase_modelname}}", "{{cookiecutter.camelcase_modelname}}"), # End. # To replace in: "src/transformers/models/auto/modeling_auto.py" if generating PyTorch # Below: "# Base model mapping" # Replace with: ("{{cookiecutter.lowercase_modelname}}", "{{cookiecutter.camelcase_modelname}}Model"), # End. # Below: "# Model with LM heads mapping" # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" -%} ("{{cookiecutter.lowercase_modelname}}", "{{cookiecutter.camelcase_modelname}}ForMaskedLM"), {% else %} ("{{cookiecutter.lowercase_modelname}}", "{{cookiecutter.camelcase_modelname}}ForConditionalGeneration"), {% endif -%} # End. # Below: "# Model for Causal LM mapping" # Replace with: ("{{cookiecutter.lowercase_modelname}}", "{{cookiecutter.camelcase_modelname}}ForCausalLM"), # End. # Below: "# Model for Masked LM mapping" # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" -%} ("{{cookiecutter.lowercase_modelname}}", "{{cookiecutter.camelcase_modelname}}ForMaskedLM"), {% else -%} {% endif -%} # End. # Below: "# Model for Sequence Classification mapping" # Replace with: ("{{cookiecutter.lowercase_modelname}}", "{{cookiecutter.camelcase_modelname}}ForSequenceClassification"), # End. # Below: "# Model for Question Answering mapping" # Replace with: ("{{cookiecutter.lowercase_modelname}}", "{{cookiecutter.camelcase_modelname}}ForQuestionAnswering"), # End. # Below: "# Model for Token Classification mapping" # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" -%} ("{{cookiecutter.lowercase_modelname}}", "{{cookiecutter.camelcase_modelname}}ForTokenClassification"), {% else -%} {% endif -%} # End. # Below: "# Model for Multiple Choice mapping" # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" -%} ("{{cookiecutter.lowercase_modelname}}", "{{cookiecutter.camelcase_modelname}}ForMultipleChoice"), {% else -%} {% endif -%} # End. # Below: "# Model for Seq2Seq Causal LM mapping" # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" -%} {% else %} ("{{cookiecutter.lowercase_modelname}}", "{{cookiecutter.camelcase_modelname}}ForConditionalGeneration"), {% endif -%} # End. # To replace in: "src/transformers/models/auto/modeling_tf_auto.py" if generating TensorFlow # Below: "# Base model mapping" # Replace with: ("{{cookiecutter.lowercase_modelname}}", "TF{{cookiecutter.camelcase_modelname}}Model"), # End. # Below: "# Model with LM heads mapping" # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" -%} ("{{cookiecutter.lowercase_modelname}}", "TF{{cookiecutter.camelcase_modelname}}ForMaskedLM"), {% else %} ("{{cookiecutter.lowercase_modelname}}", "TF{{cookiecutter.camelcase_modelname}}ForConditionalGeneration"), {% endif -%} # End. # Below: "# Model for Causal LM mapping" # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" -%} ("{{cookiecutter.lowercase_modelname}}", "TF{{cookiecutter.camelcase_modelname}}ForCausalLM"), {% else -%} {% endif -%} # End. # Below: "# Model for Masked LM mapping" # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" -%} ("{{cookiecutter.lowercase_modelname}}", "TF{{cookiecutter.camelcase_modelname}}ForMaskedLM"), {% else -%} {% endif -%} # End. # Below: "# Model for Sequence Classification mapping" # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" -%} ("{{cookiecutter.lowercase_modelname}}", "TF{{cookiecutter.camelcase_modelname}}ForSequenceClassification"), {% else -%} {% endif -%} # End. # Below: "# Model for Question Answering mapping" # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" -%} ("{{cookiecutter.lowercase_modelname}}", "TF{{cookiecutter.camelcase_modelname}}ForQuestionAnswering"), {% else -%} {% endif -%} # End. # Below: "# Model for Token Classification mapping" # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" -%} ("{{cookiecutter.lowercase_modelname}}", "TF{{cookiecutter.camelcase_modelname}}ForTokenClassification"), {% else -%} {% endif -%} # End. # Below: "# Model for Multiple Choice mapping" # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" -%} ("{{cookiecutter.lowercase_modelname}}", "TF{{cookiecutter.camelcase_modelname}}ForMultipleChoice"), {% else -%} {% endif -%} # End. # Below: "# Model for Seq2Seq Causal LM mapping" # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" -%} {% else %} ("{{cookiecutter.lowercase_modelname}}", "TF{{cookiecutter.camelcase_modelname}}ForConditionalGeneration"), {% endif -%} # End. # To replace in: "src/transformers/models/auto/modeling_flax_auto.py" if generating Flax # Below: "# Base model mapping" # Replace with: ("{{cookiecutter.lowercase_modelname}}", "Flax{{cookiecutter.camelcase_modelname}}Model"), # End. # Below: "# Model for Masked LM mapping" # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" -%} ("{{cookiecutter.lowercase_modelname}}", "Flax{{cookiecutter.camelcase_modelname}}ForMaskedLM"), {% else %} ("{{cookiecutter.lowercase_modelname}}", "Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration"), {% endif -%} # End. # Below: "# Model for Causal LM mapping" # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" -%} ("{{cookiecutter.lowercase_modelname}}", "Flax{{cookiecutter.camelcase_modelname}}ForCausalLM"), {% else -%} {% endif -%} # End. # Below: "# Model for Masked LM mapping" # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" -%} ("{{cookiecutter.lowercase_modelname}}", "Flax{{cookiecutter.camelcase_modelname}}ForMaskedLM"), {% else -%} {% endif -%} # End. # Below: "# Model for Sequence Classification mapping" # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" -%} ("{{cookiecutter.lowercase_modelname}}", "Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification"), {% else %} ("{{cookiecutter.lowercase_modelname}}", "Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification"), {% endif -%} # End. # Below: "# Model for Question Answering mapping" # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" -%} ("{{cookiecutter.lowercase_modelname}}", "Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering"), {% else %} ("{{cookiecutter.lowercase_modelname}}", "Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering"), {% endif -%} # End. # Below: "# Model for Token Classification mapping" # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" -%} ("{{cookiecutter.lowercase_modelname}}", "Flax{{cookiecutter.camelcase_modelname}}ForTokenClassification"), {% else -%} {% endif -%} # End. # Below: "# Model for Multiple Choice mapping" # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" -%} ("{{cookiecutter.lowercase_modelname}}", "Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoice"), {% else -%} {% endif -%} # End. # Below: "# Model for Seq2Seq Causal LM mapping" # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" -%} {% else %} ("{{cookiecutter.lowercase_modelname}}", "Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration"), {% endif -%} # End. # To replace in: "utils/check_repo.py" if generating PyTorch # Below: "models to ignore for model xxx mapping" # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" -%} {% else -%} "{{cookiecutter.camelcase_modelname}}Encoder", "{{cookiecutter.camelcase_modelname}}Decoder", "{{cookiecutter.camelcase_modelname}}DecoderWrapper", {% endif -%} # End. # Below: "models to ignore for not tested" # Replace with: {% if cookiecutter.is_encoder_decoder_model == "False" -%} {% else -%} "{{cookiecutter.camelcase_modelname}}Encoder", # Building part of bigger (tested) model. "{{cookiecutter.camelcase_modelname}}Decoder", # Building part of bigger (tested) model. "{{cookiecutter.camelcase_modelname}}DecoderWrapper", # Building part of bigger (tested) model. {% endif -%} # End.
19,719
40.691332
218
py
transformers
transformers-main/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/test_modeling_flax_{{cookiecutter.lowercase_modelname}}.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. {% if cookiecutter.is_encoder_decoder_model == "False" %} import unittest from transformers import is_flax_available, {{cookiecutter.camelcase_modelname}}Config from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import numpy as np from transformers import ( Flax{{cookiecutter.camelcase_modelname}}ForCausalLM, Flax{{cookiecutter.camelcase_modelname}}ForMaskedLM, Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoice, Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering, Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification, Flax{{cookiecutter.camelcase_modelname}}ForTokenClassification, Flax{{cookiecutter.camelcase_modelname}}Model, ) class Flax{{cookiecutter.camelcase_modelname}}ModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_token_type_ids = True self.use_labels = True self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 5 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = {{cookiecutter.camelcase_modelname}}Config( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, return_dict=True, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = Flax{{cookiecutter.camelcase_modelname}}Model(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} inputs = [input_ids, input_mask] result = model(*inputs) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_lm_head( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.is_decoder = True model = Flax{{cookiecutter.camelcase_modelname}}ForCausalLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } prediction_scores = model(**inputs)["logits"] self.parent.assertListEqual( list(prediction_scores.shape), [self.batch_size, self.seq_length, self.vocab_size] ) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = Flax{{cookiecutter.camelcase_modelname}}ForMaskedLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(**inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(**inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoice(config=config) multiple_choice_inputs_ids = np.tile(np.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = np.tile(np.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = np.tile(np.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(**inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = Flax{{cookiecutter.camelcase_modelname}}ForTokenClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(**inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(**inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_flax class Flax{{cookiecutter.camelcase_modelname}}ModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = ( ( Flax{{cookiecutter.camelcase_modelname}}Model, Flax{{cookiecutter.camelcase_modelname}}ForCausalLM, Flax{{cookiecutter.camelcase_modelname}}ForMaskedLM, Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering, Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification, Flax{{cookiecutter.camelcase_modelname}}ForTokenClassification, Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoice, ) if is_flax_available() else () ) test_head_masking = False test_onnx = False def setUp(self): self.model_tester = Flax{{cookiecutter.camelcase_modelname}}ModelTester(self) self.config_tester = ConfigTester(self, config_class={{cookiecutter.camelcase_modelname}}Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model = Flax{{cookiecutter.camelcase_modelname}}Model.from_pretrained("{{cookiecutter.checkpoint_identifier}}") self.assertIsNotNone(model) def _assert_tensors_equal(a, b, atol=1e-12, prefix=""): """If tensors not close, or a and b arent both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if _assert_tensors_equal(a, b, atol=atol): return True raise except Exception: if len(prefix) > 0: prefix = f"{prefix}: " raise AssertionError(f"{prefix}{a} != {b}") @require_flax class Flax{{cookiecutter.camelcase_modelname}}ModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = Flax{{cookiecutter.camelcase_modelname}}ForMaskedLM.from_pretrained("{{cookiecutter.checkpoint_identifier}}") input_ids = np.array([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] # TODO Replace vocab size vocab_size = 32000 expected_shape = [1, 6, vocab_size] self.assertEqual(output.shape, expected_shape) print(output[:, :3, :3]) # TODO Replace values below with what was printed above. expected_slice = np.array( [ [ [-0.05243197, -0.04498899, 0.05512108], [-0.07444685, -0.01064632, 0.04352357], [-0.05020351, 0.05530146, 0.00700043], ] ] ) _assert_tensors_equal(output[:, :3, :3], expected_slice, atol=1e-4) {% else %} import unittest from transformers import ( is_flax_available, {{cookiecutter.camelcase_modelname}}Config, {{cookiecutter.camelcase_modelname}}Tokenizer, ) from transformers.testing_utils import require_sentencepiece, require_flax, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import numpy as np import jax.numpy as jnp from transformers import ( Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration, Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering, Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification, Flax{{cookiecutter.camelcase_modelname}}Model, ) @require_flax class Flax{{cookiecutter.camelcase_modelname}}ModelTester: config_cls = {{cookiecutter.camelcase_modelname}}Config config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size).clip(3, self.vocab_size) eos_tensor = np.expand_dims(np.array([self.eos_token_id] * self.batch_size), 1) input_ids = np.concatenate([input_ids, eos_tensor], axis=1) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) inputs_dict = prepare_{{cookiecutter.lowercase_modelname}}_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def check_use_cache_forward(self, model_class_name, config, inputs_dict): max_decoder_length = 20 model = model_class_name(config) encoder_outputs = model.encode(inputs_dict["input_ids"]) decoder_input_ids, decoder_attention_mask = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs) decoder_attention_mask = jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype="i4") decoder_position_ids = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), ) outputs_cache = model.decode( decoder_input_ids[:, :-1], encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, decoder_position_ids=decoder_position_ids, ) decoder_position_ids = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model.decode( decoder_input_ids[:, -1:], encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=outputs_cache.past_key_values, decoder_position_ids=decoder_position_ids, ) outputs = model.decode(decoder_input_ids, encoder_outputs) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def check_use_cache_forward_with_attn_mask(self, model_class_name, config, inputs_dict): max_decoder_length = 20 model = model_class_name(config) encoder_outputs = model.encode(inputs_dict["input_ids"]) decoder_input_ids, decoder_attention_mask = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) decoder_attention_mask_cache = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ], axis=-1, ) past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs) decoder_position_ids = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), ) outputs_cache = model.decode( decoder_input_ids[:, :-1], encoder_outputs, decoder_attention_mask=decoder_attention_mask_cache, past_key_values=past_key_values, decoder_position_ids=decoder_position_ids, ) decoder_position_ids = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model.decode( decoder_input_ids[:, -1:], encoder_outputs, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=decoder_attention_mask_cache, decoder_position_ids=decoder_position_ids, ) outputs = model.decode(decoder_input_ids, encoder_outputs, decoder_attention_mask=decoder_attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def prepare_{{cookiecutter.lowercase_modelname}}_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, ): if attention_mask is None: attention_mask = np.not_equal(input_ids, config.pad_token_id).astype(np.int8) if decoder_attention_mask is None: decoder_attention_mask = np.concatenate([np.ones(decoder_input_ids[:, :1].shape, dtype=np.int8), np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id).astype(np.int8)], axis=-1) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class Flax{{cookiecutter.camelcase_modelname}}ModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = ( ( Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration, Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering, Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification, Flax{{cookiecutter.camelcase_modelname}}Model, ) if is_flax_available() else () ) all_generative_model_classes = (Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration,) if is_flax_available() else () is_encoder_decoder = True test_pruning = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = Flax{{cookiecutter.camelcase_modelname}}ModelTester(self) self.config_tester = ConfigTester(self, config_class={{cookiecutter.camelcase_modelname}}Config) def test_config(self): self.config_tester.run_common_tests() def test_use_cache_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(model_class, config, inputs_dict) def test_use_cache_forward_with_attn_mask(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(model_class, config, inputs_dict) def _assert_tensors_equal(a, b, atol=1e-12, prefix=""): """If tensors not close, or a and b arent both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if _assert_tensors_equal(a, b, atol=atol): return True raise except Exception: if len(prefix) > 0: prefix = f"{prefix}: " raise AssertionError(f"{prefix}{a} != {b}") def _long_tensor(tok_lst): return np.array(tok_lst, dtype=np.int32) TOLERANCE = 1e-4 @slow @require_sentencepiece @require_tokenizers @require_flax class Flax{{cookiecutter.camelcase_modelname}}ModelIntegrationTest(unittest.TestCase): def test_inference_no_head(self): model = Flax{{cookiecutter.camelcase_modelname}}Model.from_pretrained('{{cookiecutter.checkpoint_identifier}}') # change to intended input here input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) decoder_input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) inputs_dict = prepare_{{cookiecutter.lowercase_modelname}}_inputs_dict(model.config, input_ids, decoder_input_ids) output = model(**inputs_dict)[0] expected_shape = (1, 11, 1024) self.assertEqual(output.shape, expected_shape) # change to expected output here expected_slice = np.array( [[0.7144, 0.8143, -1.2813], [0.7144, 0.8143, -1.2813], [-0.0467, 2.5911, -2.1845]], ) _assert_tensors_equal(output[:, :3, :3], expected_slice, atol=TOLERANCE) def test_inference_with_head(self): model = Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}') # change to intended input here input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) decoder_input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) inputs_dict = prepare_{{cookiecutter.lowercase_modelname}}_inputs_dict(model.config, input_ids, decoder_input_ids) output = model(**inputs_dict)[0] expected_shape = (1, 11, 1024) self.assertEqual(output.shape, expected_shape) # change to expected output here expected_slice = np.array( [[0.7144, 0.8143, -1.2813], [0.7144, 0.8143, -1.2813], [-0.0467, 2.5911, -2.1845]], ) _assert_tensors_equal(output[:, :3, :3], expected_slice, atol=TOLERANCE) def test_seq_to_seq_generation(self): hf = Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}') tok = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}') batch_input = [ # string 1, # string 2, # string 3, # string 4, ] # The below article tests that we don't add any hypotheses outside of the top n_beams dct = tok.batch_encode_plus( batch_input, max_length=512, padding="max_length", truncation_strategy="only_first", truncation=True, return_tensors="np", ) hypotheses_batch = hf.generate( input_ids=dct["input_ids"], attention_mask=dct["attention_mask"], num_beams=2, ) EXPECTED = [ # here expected 1, # here expected 2, # here expected 3, # here expected 4, ] generated = tok.batch_decode( hypotheses_batch.tolist(), clean_up_tokenization_spaces=True, skip_special_tokens=True ) assert generated == EXPECTED {%- endif %}
26,929
39.19403
191
py
transformers
transformers-main/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/test_modeling_tf_{{cookiecutter.lowercase_modelname}}.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. {% if cookiecutter.is_encoder_decoder_model == "False" %} import unittest from transformers import is_tf_available, {{cookiecutter.camelcase_modelname}}Config from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import ( TF{{cookiecutter.camelcase_modelname}}ForCausalLM, TF{{cookiecutter.camelcase_modelname}}ForMaskedLM, TF{{cookiecutter.camelcase_modelname}}ForMultipleChoice, TF{{cookiecutter.camelcase_modelname}}ForQuestionAnswering, TF{{cookiecutter.camelcase_modelname}}ForSequenceClassification, TF{{cookiecutter.camelcase_modelname}}ForTokenClassification, TF{{cookiecutter.camelcase_modelname}}Model, ) class TF{{cookiecutter.camelcase_modelname}}ModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_token_type_ids = True self.use_labels = True self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 5 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = {{cookiecutter.camelcase_modelname}}Config( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, return_dict=True, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TF{{cookiecutter.camelcase_modelname}}Model(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_causal_lm_base_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.is_decoder = True model = TF{{cookiecutter.camelcase_modelname}}Model(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = TF{{cookiecutter.camelcase_modelname}}Model(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states) # Also check the case where encoder outputs are not passed result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_causal_lm_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.is_decoder = True model = TF{{cookiecutter.camelcase_modelname}}ForCausalLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } prediction_scores = model(inputs)["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape), [self.batch_size, self.seq_length, self.vocab_size] ) def create_and_check_causal_lm_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = TF{{cookiecutter.camelcase_modelname}}ForCausalLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states) prediction_scores = result["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape), [self.batch_size, self.seq_length, self.vocab_size] ) def create_and_check_causal_lm_model_past( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.is_decoder = True model = TF{{cookiecutter.camelcase_modelname}}ForCausalLM(config=config) # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs.past_key_values # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and attn_mask next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) output_from_no_past = model(next_input_ids, output_hidden_states=True).hidden_states[0] output_from_past = model( next_tokens, past_key_values=past_key_values, output_hidden_states=True ).hidden_states[0] # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6) def create_and_check_causal_lm_model_past_with_attn_mask( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.is_decoder = True model = TF{{cookiecutter.camelcase_modelname}}ForCausalLM(config=config) # create attention mask half_seq_length = self.seq_length // 2 attn_mask_begin = tf.ones((self.batch_size, half_seq_length), dtype=tf.int32) attn_mask_end = tf.zeros((self.batch_size, self.seq_length - half_seq_length), dtype=tf.int32) attn_mask = tf.concat([attn_mask_begin, attn_mask_end], axis=1) # first forward pass outputs = model(input_ids, attention_mask=attn_mask, use_cache=True) # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) past_key_values = outputs.past_key_values # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).numpy() + 1 random_other_next_tokens = ids_tensor((self.batch_size, self.seq_length), config.vocab_size) vector_condition = tf.range(self.seq_length) == (self.seq_length - random_seq_idx_to_change) condition = tf.transpose( tf.broadcast_to(tf.expand_dims(vector_condition, -1), (self.seq_length, self.batch_size)) ) input_ids = tf.where(condition, random_other_next_tokens, input_ids) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) attn_mask = tf.concat( [attn_mask, tf.ones((attn_mask.shape[0], 1), dtype=tf.int32)], axis=1, ) output_from_no_past = model( next_input_ids, attention_mask=attn_mask, output_hidden_states=True, ).hidden_states[0] output_from_past = model( next_tokens, past_key_values=past_key_values, attention_mask=attn_mask, output_hidden_states=True ).hidden_states[0] # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6) def create_and_check_causal_lm_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.is_decoder = True model = TF{{cookiecutter.camelcase_modelname}}ForCausalLM(config=config) input_ids = input_ids[:1, :] input_mask = input_mask[:1, :] self.batch_size = 1 # first forward pass outputs = model(input_ids, attention_mask=input_mask, use_cache=True) past_key_values = outputs.past_key_values # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([input_mask, next_attn_mask], axis=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, output_hidden_states=True, ).hidden_states[0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values, output_hidden_states=True, ).hidden_states[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = TF{{cookiecutter.camelcase_modelname}}ForCausalLM(config=config) input_ids = input_ids[:1, :] input_mask = input_mask[:1, :] encoder_hidden_states = encoder_hidden_states[:1, :, :] encoder_attention_mask = encoder_attention_mask[:1, :] self.batch_size = 1 # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([input_mask, next_attn_mask], axis=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, ).hidden_states[0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, ).hidden_states[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TF{{cookiecutter.camelcase_modelname}}ForMaskedLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TF{{cookiecutter.camelcase_modelname}}ForSequenceClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TF{{cookiecutter.camelcase_modelname}}ForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TF{{cookiecutter.camelcase_modelname}}ForTokenClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TF{{cookiecutter.camelcase_modelname}}ForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TF{{cookiecutter.camelcase_modelname}}ModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = ( ( TF{{cookiecutter.camelcase_modelname}}Model, TF{{cookiecutter.camelcase_modelname}}ForCausalLM, TF{{cookiecutter.camelcase_modelname}}ForMaskedLM, TF{{cookiecutter.camelcase_modelname}}ForQuestionAnswering, TF{{cookiecutter.camelcase_modelname}}ForSequenceClassification, TF{{cookiecutter.camelcase_modelname}}ForTokenClassification, TF{{cookiecutter.camelcase_modelname}}ForMultipleChoice, ) if is_tf_available() else () ) test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TF{{cookiecutter.camelcase_modelname}}ModelTester(self) self.config_tester = ConfigTester(self, config_class={{cookiecutter.camelcase_modelname}}Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): """Test the base model""" config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Template classes interact badly with this test.") def test_keras_fit(self): pass def test_causal_lm_base_model(self): """Test the base model of the causal LM model is_deocder=True, no cross_attention, no encoder outputs """ config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_base_model(*config_and_inputs) def test_model_as_decoder(self): """Test the base model as a decoder (of an encoder-decoder architecture) is_deocder=True + cross_attention + pass encoder outputs """ config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_causal_lm(self): """Test the causal LM model""" config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model(*config_and_inputs) def test_causal_lm_model_as_decoder(self): """Test the causal LM model as a decoder""" config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_causal_lm_model_as_decoder(*config_and_inputs) def test_causal_lm_model_past(self): """Test causal LM model with `past_key_values`""" config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model_past(*config_and_inputs) def test_causal_lm_model_past_with_attn_mask(self): """Test the causal LM model with `past_key_values` and `attention_mask`""" config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model_past_with_attn_mask(*config_and_inputs) def test_causal_lm_model_past_with_large_inputs(self): """Test the causal LM model with `past_key_values` and a longer decoder sequence length""" config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model_past_large_inputs(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): """Similar to `test_causal_lm_model_past_with_large_inputs` but with cross-attention""" config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model = TF{{cookiecutter.camelcase_modelname}}Model.from_pretrained("{{cookiecutter.checkpoint_identifier}}") self.assertIsNotNone(model) @require_tf class TF{{cookiecutter.camelcase_modelname}}ModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = TF{{cookiecutter.camelcase_modelname}}ForMaskedLM.from_pretrained("{{cookiecutter.checkpoint_identifier}}") input_ids = tf.constant([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] # TODO Replace vocab size vocab_size = 32000 expected_shape = [1, 6, vocab_size] self.assertEqual(output.shape, expected_shape) print(output[:, :3, :3]) # TODO Replace values below with what was printed above. expected_slice = tf.constant( [ [ [-0.05243197, -0.04498899, 0.05512108], [-0.07444685, -0.01064632, 0.04352357], [-0.05020351, 0.05530146, 0.00700043], ] ] ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4) {% else %} import unittest from transformers import ( is_tf_available, {{cookiecutter.camelcase_modelname}}Config, {{cookiecutter.camelcase_modelname}}Tokenizer, ) from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor if is_tf_available(): import tensorflow as tf from transformers import ( TF{{cookiecutter.camelcase_modelname}}ForConditionalGeneration, TF{{cookiecutter.camelcase_modelname}}Model, ) @require_tf class TF{{cookiecutter.camelcase_modelname}}ModelTester: config_cls = {{cookiecutter.camelcase_modelname}}Config config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size) eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1) input_ids = tf.concat([input_ids, eos_tensor], axis=1) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) inputs_dict = prepare_{{cookiecutter.lowercase_modelname}}_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def check_decoder_model_past_large_inputs(self, config, inputs_dict): model = TF{{cookiecutter.camelcase_modelname}}Model(config=config).get_decoder() input_ids = inputs_dict["input_ids"] input_ids = input_ids[:1, :] attention_mask = inputs_dict["attention_mask"][:1, :] self.batch_size = 1 # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def prepare_{{cookiecutter.lowercase_modelname}}_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, ): if attention_mask is None: attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int32) if decoder_attention_mask is None: decoder_attention_mask = tf.concat([tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.int32), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.int32)], axis=-1) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_tf class TF{{cookiecutter.camelcase_modelname}}ModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = (TF{{cookiecutter.camelcase_modelname}}ForConditionalGeneration, TF{{cookiecutter.camelcase_modelname}}Model) if is_tf_available() else () all_generative_model_classes = (TF{{cookiecutter.camelcase_modelname}}ForConditionalGeneration,) if is_tf_available() else () is_encoder_decoder = True test_pruning = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TF{{cookiecutter.camelcase_modelname}}ModelTester(self) self.config_tester = ConfigTester(self, config_class={{cookiecutter.camelcase_modelname}}Config) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs) @unittest.skip(reason="Template classes interact badly with this test.") def test_keras_fit(self): pass def _assert_tensors_equal(a, b, atol=1e-12, prefix=""): """If tensors not close, or a and b arent both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if tf.debugging.assert_near(a, b, atol=atol): return True raise except Exception: if len(prefix) > 0: prefix = f"{prefix}: " raise AssertionError(f"{prefix}{a} != {b}") def _long_tensor(tok_lst): return tf.constant(tok_lst, dtype=tf.int32) TOLERANCE = 1e-4 @slow @require_sentencepiece @require_tokenizers @require_tf class TF{{cookiecutter.camelcase_modelname}}ModelIntegrationTest(unittest.TestCase): def test_inference_no_head(self): model = TF{{cookiecutter.camelcase_modelname}}Model.from_pretrained('{{cookiecutter.checkpoint_identifier}}') # change to intended input here input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) decoder_input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) inputs_dict = prepare_{{cookiecutter.lowercase_modelname}}_inputs_dict(model.config, input_ids, decoder_input_ids) output = model(**inputs_dict)[0] expected_shape = (1, 11, 1024) self.assertEqual(output.shape, expected_shape) # change to expected output here expected_slice = tf.Tensor( [[0.7144, 0.8143, -1.2813], [0.7144, 0.8143, -1.2813], [-0.0467, 2.5911, -2.1845]], ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=TOLERANCE) def test_inference_with_head(self): model = TF{{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}') # change to intended input here input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) decoder_input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) inputs_dict = prepare_{{cookiecutter.lowercase_modelname}}_inputs_dict(model.config, input_ids, decoder_input_ids) output = model(**inputs_dict)[0] expected_shape = (1, 11, 1024) self.assertEqual(output.shape, expected_shape) # change to expected output here expected_slice = tf.Tensor( [[0.7144, 0.8143, -1.2813], [0.7144, 0.8143, -1.2813], [-0.0467, 2.5911, -2.1845]], ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=TOLERANCE) def test_seq_to_seq_generation(self): hf = TF{{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}') tok = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}') batch_input = [ # string 1, # string 2, # string 3, # string 4, ] # The below article tests that we don't add any hypotheses outside of the top n_beams dct = tok.batch_encode_plus( batch_input, max_length=512, padding="max_length", truncation_strategy="only_first", truncation=True, return_tensors="tf", ) hypotheses_batch = hf.generate( input_ids=dct["input_ids"], attention_mask=dct["attention_mask"], num_beams=2, ) EXPECTED = [ # here expected 1, # here expected 2, # here expected 3, # here expected 4, ] generated = tok.batch_decode( hypotheses_batch.tolist(), clean_up_tokenization_spaces=True, skip_special_tokens=True ) assert generated == EXPECTED {%- endif %}
38,379
38.485597
195
py
transformers
transformers-main/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/test_modeling_{{cookiecutter.lowercase_modelname}}.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch {{cookiecutter.modelname}} model. """ {% if cookiecutter.is_encoder_decoder_model == "False" -%} import unittest from ...test_modeling_common import floats_tensor from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from transformers import {{cookiecutter.camelcase_modelname}}Config from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( {{cookiecutter.camelcase_modelname}}ForCausalLM, {{cookiecutter.camelcase_modelname}}ForMaskedLM, {{cookiecutter.camelcase_modelname}}ForMultipleChoice, {{cookiecutter.camelcase_modelname}}ForQuestionAnswering, {{cookiecutter.camelcase_modelname}}ForSequenceClassification, {{cookiecutter.camelcase_modelname}}ForTokenClassification, {{cookiecutter.camelcase_modelname}}Model, ) from transformers.models.{{cookiecutter.lowercase_modelname}}.modeling_{{cookiecutter.lowercase_modelname}} import ( {{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST, ) class {{cookiecutter.camelcase_modelname}}ModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return {{cookiecutter.camelcase_modelname}}Config( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = {{cookiecutter.camelcase_modelname}}Model(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = {{cookiecutter.camelcase_modelname}}Model(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = {{cookiecutter.camelcase_modelname}}ForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = {{cookiecutter.camelcase_modelname}}ForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = {{cookiecutter.camelcase_modelname}}ForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = {{cookiecutter.camelcase_modelname}}ForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = {{cookiecutter.camelcase_modelname}}ForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = {{cookiecutter.camelcase_modelname}}ForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = {{cookiecutter.camelcase_modelname}}ForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class {{cookiecutter.camelcase_modelname}}ModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = ( ( {{cookiecutter.camelcase_modelname}}Model, {{cookiecutter.camelcase_modelname}}ForMaskedLM, {{cookiecutter.camelcase_modelname}}ForCausalLM, {{cookiecutter.camelcase_modelname}}ForMultipleChoice, {{cookiecutter.camelcase_modelname}}ForQuestionAnswering, {{cookiecutter.camelcase_modelname}}ForSequenceClassification, {{cookiecutter.camelcase_modelname}}ForTokenClassification, ) if is_torch_available() else () ) all_generative_model_classes = ({{cookiecutter.camelcase_modelname}}ForCausalLM,) if is_torch_available() else () def setUp(self): self.model_tester = {{cookiecutter.camelcase_modelname}}ModelTester(self) self.config_tester = ConfigTester(self, config_class={{cookiecutter.camelcase_modelname}}Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): # This regression test was failing with PyTorch < 1.3 ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) @slow def test_model_from_pretrained(self): for model_name in {{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = {{cookiecutter.camelcase_modelname}}Model.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class {{cookiecutter.camelcase_modelname}}ModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = {{cookiecutter.camelcase_modelname}}ForMaskedLM.from_pretrained("{{cookiecutter.checkpoint_identifier}}") input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] # TODO Replace vocab size vocab_size = 32000 expected_shape = torch.Size((1, 6, vocab_size)) self.assertEqual(output.shape, expected_shape) # TODO Replace values below with what was printed above. expected_slice = torch.tensor( [[[-0.0483, 0.1188, -0.0313], [-0.0606, 0.1435, 0.0199], [-0.0235, 0.1519, 0.0175]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4)) {% else -%} import copy import tempfile import unittest from transformers import is_torch_available from transformers.utils import cached_property from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor if is_torch_available(): import torch from transformers import ( {{cookiecutter.camelcase_modelname}}Config, {{cookiecutter.camelcase_modelname}}ForConditionalGeneration, {{cookiecutter.camelcase_modelname}}ForQuestionAnswering, {{cookiecutter.camelcase_modelname}}ForCausalLM, {{cookiecutter.camelcase_modelname}}ForSequenceClassification, {{cookiecutter.camelcase_modelname}}Model, {{cookiecutter.camelcase_modelname}}Tokenizer, ) from transformers.models.{{cookiecutter.lowercase_modelname}}.modeling_{{cookiecutter.lowercase_modelname}} import ( {{cookiecutter.camelcase_modelname}}Decoder, {{cookiecutter.camelcase_modelname}}Encoder, ) def prepare_{{cookiecutter.lowercase_modelname}}_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } @require_torch class {{cookiecutter.camelcase_modelname}}ModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp( 3, ) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = {{cookiecutter.camelcase_modelname}}Config( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, ) inputs_dict = prepare_{{cookiecutter.lowercase_modelname}}_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = {{cookiecutter.camelcase_modelname}}Model(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = {{cookiecutter.camelcase_modelname}}Model(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = {{cookiecutter.camelcase_modelname}}Encoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = {{cookiecutter.camelcase_modelname}}Decoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=inputs_dict["attention_mask"], )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class {{cookiecutter.camelcase_modelname}}ModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = ( ({{cookiecutter.camelcase_modelname}}Model, {{cookiecutter.camelcase_modelname}}ForConditionalGeneration, {{cookiecutter.camelcase_modelname}}ForSequenceClassification, {{cookiecutter.camelcase_modelname}}ForQuestionAnswering) if is_torch_available() else () ) all_generative_model_classes = ({{cookiecutter.camelcase_modelname}}ForConditionalGeneration,) if is_torch_available() else () is_encoder_decoder = True test_pruning = False test_head_masking = False test_missing_keys = False def setUp(self): self.model_tester = {{cookiecutter.camelcase_modelname}}ModelTester(self) self.config_tester = ConfigTester(self, config_class={{cookiecutter.camelcase_modelname}}Config) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) # {{cookiecutter.camelcase_modelname}}ForSequenceClassification does not support inputs_embeds def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in ({{cookiecutter.camelcase_modelname}}Model, {{cookiecutter.camelcase_modelname}}ForConditionalGeneration, {{cookiecutter.camelcase_modelname}}ForQuestionAnswering): model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = {{cookiecutter.camelcase_modelname}}ForConditionalGeneration(config).eval().to(torch_device) if torch_device == "cuda": model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def assert_tensors_close(a, b, atol=1e-12, prefix=""): """If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item() if a.numel() > 100: msg = f"tensor values are {pct_different:.1%} percent different." else: msg = f"{a} != {b}" if prefix: msg = prefix + ": " + msg raise AssertionError(msg) def _long_tensor(tok_lst): return torch.tensor(tok_lst, dtype=torch.long, device=torch_device) TOLERANCE = 1e-4 @require_torch @require_sentencepiece @require_tokenizers @slow class {{cookiecutter.camelcase_modelname}}ModelIntegrationTests(unittest.TestCase): @cached_property def default_tokenizer(self): return {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}') def test_inference_no_head(self): model = {{cookiecutter.camelcase_modelname}}Model.from_pretrained('{{cookiecutter.checkpoint_identifier}}').to(torch_device) input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) decoder_input_ids = _long_tensor([[2, 0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588]]) inputs_dict = prepare_{{cookiecutter.lowercase_modelname}}_inputs_dict(model.config, input_ids, decoder_input_ids) with torch.no_grad(): output = model(**inputs_dict)[0] expected_shape = torch.Size((1, 11, 1024)) self.assertEqual(output.shape, expected_shape) # change to expected output here expected_slice = torch.tensor( [[0.7144, 0.8143, -1.2813], [0.7144, 0.8143, -1.2813], [-0.0467, 2.5911, -2.1845]], device=torch_device ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE)) def test_inference_head(self): model = {{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}').to(torch_device) # change to intended input input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) decoder_input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) inputs_dict = prepare_{{cookiecutter.lowercase_modelname}}_inputs_dict(model.config, input_ids, decoder_input_ids) with torch.no_grad(): output = model(**inputs_dict)[0] expected_shape = torch.Size((1, 11, model.config.vocab_size)) self.assertEqual(output.shape, expected_shape) # change to expected output here expected_slice = torch.tensor( [[0.7144, 0.8143, -1.2813], [0.7144, 0.8143, -1.2813], [-0.0467, 2.5911, -2.1845]], device=torch_device ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE)) def test_seq_to_seq_generation(self): hf = {{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}').to(torch_device) tok = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}') batch_input = [ # string 1, # string 2, # string 3, # string 4, ] # The below article tests that we don't add any hypotheses outside of the top n_beams dct = tok.batch_encode_plus( batch_input, max_length=512, padding="max_length", truncation_strategy="only_first", truncation=True, return_tensors="pt", ) hypotheses_batch = hf.generate( input_ids=dct["input_ids"].to(torch_device), attention_mask=dct["attention_mask"].to(torch_device), num_beams=2, ) EXPECTED = [ # here expected 1, # here expected 2, # here expected 3, # here expected 4, ] generated = tok.batch_decode( hypotheses_batch.tolist(), clean_up_tokenization_spaces=True, skip_special_tokens=True ) assert generated == EXPECTED class {{cookiecutter.camelcase_modelname}}StandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, d_model=16, decoder_seq_length=7, is_training=True, is_decoder=True, use_attention_mask=True, use_cache=False, use_labels=True, decoder_start_token_id=2, decoder_ffn_dim=32, decoder_layers=4, encoder_attention_heads=4, decoder_attention_heads=4, max_position_embeddings=30, is_encoder_decoder=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, scope=None, ): self.parent = parent self.batch_size = batch_size self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = decoder_layers self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.num_attention_heads = decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.use_cache = use_cache self.max_position_embeddings = max_position_embeddings self.is_encoder_decoder = is_encoder_decoder self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = {{cookiecutter.camelcase_modelname}}Config( vocab_size=self.vocab_size, d_model=self.d_model, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_attention_heads=self.encoder_attention_heads, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, lm_labels, ) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = {{cookiecutter.camelcase_modelname}}Decoder(config=config).to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, attention_mask, lm_labels, ): model = {{cookiecutter.camelcase_modelname}}Decoder(config=config).to(torch_device).eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class {{cookiecutter.camelcase_modelname}}StandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = ({{cookiecutter.camelcase_modelname}}Decoder, {{cookiecutter.camelcase_modelname}}ForCausalLM) if is_torch_available() else () all_generative_model_classes = ({{cookiecutter.camelcase_modelname}}ForCausalLM,) if is_torch_available() else () test_pruning = False is_encoder_decoder = False def setUp( self, ): self.model_tester = {{cookiecutter.camelcase_modelname}}StandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class={{cookiecutter.camelcase_modelname}}Config) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_attn_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) def test_retain_grad_hidden_states_attentions(self): # decoder cannot keep gradients return {% endif -%}
44,199
40.269841
234
py
transformers
transformers-main/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/configuration_{{cookiecutter.lowercase_modelname}}.py
# coding=utf-8 # Copyright 2022 {{cookiecutter.authors}} and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ {{cookiecutter.modelname}} model configuration """ from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) {{cookiecutter.uppercase_modelname}}_PRETRAINED_CONFIG_ARCHIVE_MAP = { "{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/config.json", # See all {{cookiecutter.modelname}} models at https://huggingface.co/models?filter={{cookiecutter.lowercase_modelname}} } class {{cookiecutter.camelcase_modelname}}Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`~{{cookiecutter.camelcase_modelname}}Model`]. It is used to instantiate an {{cookiecutter.modelname}} model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the {{cookiecutter.modelname}} [{{cookiecutter.checkpoint_identifier}}](https://huggingface.co/{{cookiecutter.checkpoint_identifier}}) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: {% if cookiecutter.is_encoder_decoder_model == "False" -%} vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the {{cookiecutter.modelname}} model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`~{{cookiecutter.camelcase_modelname}}Model`] or [`~TF{{cookiecutter.camelcase_modelname}}Model`]. hidden_size (`int`, *optional*, defaults to 768): Dimension of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`~{{cookiecutter.camelcase_modelname}}Model`] or [`~TF{{cookiecutter.camelcase_modelname}}Model`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. {% else -%} vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the {{cookiecutter.modelname}} model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`~{{cookiecutter.camelcase_modelname}}Model`] or [`~TF{{cookiecutter.camelcase_modelname}}Model`]. d_model (`int`, *optional*, defaults to 1024): Dimension of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimension of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimension of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for classifier. max_position_embeddings (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). {% endif -%} Example: ```python >>> from transformers import {{cookiecutter.camelcase_modelname}}Model, {{cookiecutter.camelcase_modelname}}Config >>> # Initializing a {{cookiecutter.modelname}} {{cookiecutter.checkpoint_identifier}} style configuration >>> configuration = {{cookiecutter.camelcase_modelname}}Config() >>> # Initializing a model from the {{cookiecutter.checkpoint_identifier}} style configuration >>> model = {{cookiecutter.camelcase_modelname}}Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "{{cookiecutter.lowercase_modelname}}" {% if cookiecutter.is_encoder_decoder_model == "False" -%} {% else -%} keys_to_ignore_at_inference = ["past_key_values"] {% endif -%} {% if cookiecutter.is_encoder_decoder_model == "False" %} {%- else %} attribute_map = { "num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model" } {%- endif %} def __init__( self, {% if cookiecutter.is_encoder_decoder_model == "False" -%} vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, use_cache=True, {% else -%} vocab_size=50265, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function="gelu", d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=2, classifier_dropout=0.0, scale_embedding=False, {% endif -%} pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings {% if cookiecutter.is_encoder_decoder_model == "False" -%} self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache {% else -%} self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.classifier_dropout = classifier_dropout self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True {% endif -%} super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, {% if cookiecutter.is_encoder_decoder_model == "False" -%} {% else -%} is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, {% endif -%} **kwargs )
12,026
48.904564
152
py
transformers
transformers-main/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py
# coding=utf-8 # Copyright 2022 {{cookiecutter.authors}} and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 {{cookiecutter.modelname}} model. """ {% if cookiecutter.is_encoder_decoder_model == "False" %} import math from typing import Dict, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...utils import ( DUMMY_INPUTS, MULTIPLE_CHOICE_DUMMY_INPUTS, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, ) from ...modeling_tf_outputs import ( TFBaseModelOutputWithPastAndCrossAttentions, TFCausalLMOutputWithCrossAttentions, TFMaskedLMOutput, TFMultipleChoiceModelOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput, ) from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFMaskedLanguageModelingLoss, TFModelInputType, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFSequenceSummary, TFTokenClassificationLoss, get_initializer, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import logging from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Config logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "{{cookiecutter.checkpoint_identifier}}" _CONFIG_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Config" TF_{{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST = [ "{{cookiecutter.checkpoint_identifier}}", # See all {{cookiecutter.modelname}} models at https://huggingface.co/models?filter={{cookiecutter.lowercase_modelname}} ] # Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings with Bert->{{cookiecutter.camelcase_modelname}} class TF{{cookiecutter.camelcase_modelname}}Embeddings(tf.keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, **kwargs): super().__init__(**kwargs) self.vocab_size = config.vocab_size self.type_vocab_size = config.type_vocab_size self.hidden_size = config.hidden_size self.max_position_embeddings = config.max_position_embeddings self.initializer_range = config.initializer_range self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def build(self, input_shape: tf.TensorShape): with tf.name_scope("word_embeddings"): self.weight = self.add_weight( name="weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("token_type_embeddings"): self.token_type_embeddings = self.add_weight( name="embeddings", shape=[self.type_vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("position_embeddings"): self.position_embeddings = self.add_weight( name="embeddings", shape=[self.max_position_embeddings, self.hidden_size], initializer=get_initializer(self.initializer_range), ) super().build(input_shape) def call( self, input_ids: tf.Tensor = None, position_ids: tf.Tensor = None, token_type_ids: tf.Tensor = None, inputs_embeds: tf.Tensor = None, past_key_values_length=0, training: bool = False, ) -> tf.Tensor: """ Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor. """ assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: check_embeddings_within_bounds(input_ids, self.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) if position_ids is None: position_ids = tf.expand_dims( tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0 ) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings = inputs_embeds + position_embeds + token_type_embeds final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->{{cookiecutter.camelcase_modelname}} class TF{{cookiecutter.camelcase_modelname}}SelfAttention(tf.keras.layers.Layer): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number " f"of attention heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.sqrt_att_head_size = math.sqrt(self.attention_head_size) self.query = tf.keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" ) self.key = tf.keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" ) self.value = tf.keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" ) self.dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size] return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor, encoder_attention_mask: tf.Tensor, past_key_value: Tuple[tf.Tensor], output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(inputs=hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size) key_layer = tf.concat([past_key_value[0], key_layer], axis=2) value_layer = tf.concat([past_key_value[1], value_layer], axis=2) else: key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. # (batch size, num_heads, seq_len_q, seq_len_k) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) attention_scores = tf.divide(attention_scores, dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TF{{cookiecutter.camelcase_modelname}}Model call() function) attention_scores = tf.add(attention_scores, attention_mask) # Normalize the attention scores to probabilities. attention_probs = stable_softmax(logits=attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(inputs=attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = tf.multiply(attention_probs, head_mask) attention_output = tf.matmul(attention_probs, value_layer) attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, all_head_size) attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size)) outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->{{cookiecutter.camelcase_modelname}} class TF{{cookiecutter.camelcase_modelname}}SelfOutput(tf.keras.layers.Layer): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention with Bert->{{cookiecutter.camelcase_modelname}} class TF{{cookiecutter.camelcase_modelname}}Attention(tf.keras.layers.Layer): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, **kwargs): super().__init__(**kwargs) self.self_attention = TF{{cookiecutter.camelcase_modelname}}SelfAttention(config, name="self") self.dense_output = TF{{cookiecutter.camelcase_modelname}}SelfOutput(config, name="output") def prune_heads(self, heads): raise NotImplementedError def call( self, input_tensor: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor, encoder_attention_mask: tf.Tensor, past_key_value: Tuple[tf.Tensor], output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: self_outputs = self.self_attention( hidden_states=input_tensor, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, training=training, ) attention_output = self.dense_output( hidden_states=self_outputs[0], input_tensor=input_tensor, training=training ) # add attentions (possibly with past_key_value) if we output them outputs = (attention_output,) + self_outputs[1:] return outputs # Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->{{cookiecutter.camelcase_modelname}} class TF{{cookiecutter.camelcase_modelname}}Intermediate(tf.keras.layers.Layer): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->{{cookiecutter.camelcase_modelname}} class TF{{cookiecutter.camelcase_modelname}}Output(tf.keras.layers.Layer): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->{{cookiecutter.camelcase_modelname}} class TF{{cookiecutter.camelcase_modelname}}Layer(tf.keras.layers.Layer): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, **kwargs): super().__init__(**kwargs) self.attention = TF{{cookiecutter.camelcase_modelname}}Attention(config, name="attention") self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = TF{{cookiecutter.camelcase_modelname}}Attention(config, name="crossattention") self.intermediate = TF{{cookiecutter.camelcase_modelname}}Intermediate(config, name="intermediate") self.bert_output = TF{{cookiecutter.camelcase_modelname}}Output(config, name="output") def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor | None, encoder_attention_mask: tf.Tensor | None, past_key_value: Tuple[tf.Tensor] | None, output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( input_tensor=hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=self_attn_past_key_value, output_attentions=output_attentions, training=training, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers " "by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( input_tensor=attention_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, training=training, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value intermediate_output = self.intermediate(hidden_states=attention_output) layer_output = self.bert_output( hidden_states=intermediate_output, input_tensor=attention_output, training=training ) outputs = (layer_output,) + outputs # add attentions if we output them # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs # Copied from transformers.models.bert.modeling_tf_bert.TFBertEncoder with Bert->{{cookiecutter.camelcase_modelname}} class TF{{cookiecutter.camelcase_modelname}}Encoder(tf.keras.layers.Layer): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, **kwargs): super().__init__(**kwargs) self.config = config self.layer = [TF{{cookiecutter.camelcase_modelname}}Layer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor | None, encoder_attention_mask: tf.Tensor | None, past_key_values: Tuple[Tuple[tf.Tensor]] | None, use_cache: Optional[bool], output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool = False, ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) past_key_value = past_key_values[i] if past_key_values is not None else None layer_outputs = layer_module( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, training=training, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if self.config.add_cross_attention and encoder_hidden_states is not None: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHeadTransform with Bert->{{cookiecutter.camelcase_modelname}} class TF{{cookiecutter.camelcase_modelname}}PredictionHeadTransform(tf.keras.layers.Layer): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) if isinstance(config.hidden_act, str): self.transform_act_fn = get_tf_activation(config.hidden_act) else: self.transform_act_fn = config.hidden_act self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(inputs=hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMPredictionHead with Bert->{{cookiecutter.camelcase_modelname}} class TF{{cookiecutter.camelcase_modelname}}LMPredictionHead(tf.keras.layers.Layer): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, input_embeddings: tf.keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.vocab_size = config.vocab_size self.hidden_size = config.hidden_size self.transform = TF{{cookiecutter.camelcase_modelname}}PredictionHeadTransform(config, name="transform") # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.input_embeddings = input_embeddings def build(self, input_shape: tf.TensorShape): self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias") super().build(input_shape) def get_output_embeddings(self) -> tf.keras.layers.Layer: return self.input_embeddings def set_output_embeddings(self, value: tf.Variable): self.input_embeddings.weight = value self.input_embeddings.vocab_size = shape_list(value)[0] def get_bias(self) -> Dict[str, tf.Variable]: return {"bias": self.bias} def set_bias(self, value: tf.Variable): self.bias = value["bias"] self.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.transform(hidden_states=hidden_states) seq_length = shape_list(hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size]) hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) return hidden_states # Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->{{cookiecutter.camelcase_modelname}} class TF{{cookiecutter.camelcase_modelname}}MLMHead(tf.keras.layers.Layer): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, input_embeddings: tf.keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.predictions = TF{{cookiecutter.camelcase_modelname}}LMPredictionHead(config, input_embeddings, name="predictions") def call(self, sequence_output: tf.Tensor) -> tf.Tensor: prediction_scores = self.predictions(hidden_states=sequence_output) return prediction_scores @keras_serializable class TF{{cookiecutter.camelcase_modelname}}MainLayer(tf.keras.layers.Layer): config_class = {{cookiecutter.camelcase_modelname}}Config def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, add_pooling_layer: bool = True, **kwargs): super().__init__(**kwargs) self.config = config self.is_decoder = config.is_decoder self.embeddings = TF{{cookiecutter.camelcase_modelname}}Embeddings(config, name="embeddings") self.encoder = TF{{cookiecutter.camelcase_modelname}}Encoder(config, name="encoder") # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.get_input_embeddings def get_input_embeddings(self) -> tf.keras.layers.Layer: return self.embeddings # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.set_input_embeddings def set_input_embeddings(self, value: tf.Variable): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer._prune_heads def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError @unpack_inputs def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: if not self.config.is_decoder: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape if past_key_values is None: past_key_values_length = 0 past_key_values = [None] * len(self.encoder.layer) else: past_key_values_length = shape_list(past_key_values[0][0])[-2] if attention_mask is None: attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1) if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, training=training, ) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask_shape = shape_list(attention_mask) mask_seq_length = seq_length + past_key_values_length # Copied from `modeling_tf_t5.py` # Provided a padding mask of dimensions [batch_size, mask_seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length] if self.is_decoder: seq_ids = tf.range(mask_seq_length) causal_mask = tf.less_equal( tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)), seq_ids[None, :, None], ) causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype) extended_attention_mask = causal_mask * attention_mask[:, None, :] attention_mask_shape = shape_list(extended_attention_mask) extended_attention_mask = tf.reshape( extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2]) ) if past_key_values[0] is not None: # attention_mask needs to be sliced to the shape `[batch_size, 1, from_seq_length - cached_seq_length, to_seq_length] extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :] else: extended_attention_mask = tf.reshape( attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1]) ) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) one_cst = tf.constant(1.0, dtype=embedding_output.dtype) ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) # Copied from `modeling_tf_t5.py` with -1e9 -> -10000 if self.is_decoder and encoder_attention_mask is not None: # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length] # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] encoder_attention_mask = tf.cast( encoder_attention_mask, dtype=extended_attention_mask.dtype ) num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask)) if num_dims_encoder_attention_mask == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if num_dims_encoder_attention_mask == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask, # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2))) encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.config.num_hidden_layers encoder_outputs = self.encoder( hidden_states=embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] if not return_dict: return ( sequence_output, ) + encoder_outputs[1:] return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=sequence_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class TF{{cookiecutter.camelcase_modelname}}PreTrainedModel(TFPreTrainedModel): """An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = {{cookiecutter.camelcase_modelname}}Config base_model_prefix = "{{cookiecutter.lowercase_modelname}}" {{cookiecutter.uppercase_modelname}}_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with (subclassing)[https://keras.io/guides/making_new_layers_and_models_via_subclassing/] then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Args: config ([`~{{cookiecutter.camelcase_modelname}}Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ {{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING = r""" Args: input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]`, `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare {{cookiecutter.modelname}} Model transformer outputing raw hidden-states without any specific head on top.", {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class TF{{cookiecutter.camelcase_modelname}}Model(TF{{cookiecutter.camelcase_modelname}}PreTrainedModel): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.{{cookiecutter.lowercase_modelname}} = TF{{cookiecutter.camelcase_modelname}}MainLayer(config, name="{{cookiecutter.lowercase_modelname}}") @unpack_inputs @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: r""" encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Set to `False` during training, `True` during generation """ outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs @add_start_docstrings("""{{cookiecutter.modelname}} Model with a `language modeling` head on top. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING) class TF{{cookiecutter.camelcase_modelname}}ForMaskedLM(TF{{cookiecutter.camelcase_modelname}}PreTrainedModel, TFMaskedLanguageModelingLoss): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) if config.is_decoder: logger.warning( "If you want to use `TF{{cookiecutter.camelcase_modelname}}ForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.{{cookiecutter.lowercase_modelname}} = TF{{cookiecutter.camelcase_modelname}}MainLayer(config, name="{{cookiecutter.lowercase_modelname}}") self.mlm = TF{{cookiecutter.camelcase_modelname}}MLMHead(config, input_embeddings=self.{{cookiecutter.lowercase_modelname}}.embeddings, name="mlm___cls") def get_lm_head(self) -> tf.keras.layers.Layer: return self.mlm.predictions @unpack_inputs @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] prediction_scores = self.mlm(sequence_output=sequence_output, training=training) loss = ( None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores) ) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFMaskedLMOutput( loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """{{cookiecutter.modelname}} Model with a `language modeling` head on top for CLM fine-tuning. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING ) class TF{{cookiecutter.camelcase_modelname}}ForCausalLM(TF{{cookiecutter.camelcase_modelname}}PreTrainedModel, TFCausalLanguageModelingLoss): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) if not config.is_decoder: logger.warning("If you want to use `TF{{cookiecutter.camelcase_modelname}}ForCausalLM` as a standalone, add `is_decoder=True.`") self.{{cookiecutter.lowercase_modelname}} = TF{{cookiecutter.camelcase_modelname}}MainLayer(config, name="{{cookiecutter.lowercase_modelname}}") self.mlm = TF{{cookiecutter.camelcase_modelname}}MLMHead(config, input_embeddings=self.{{cookiecutter.lowercase_modelname}}.embeddings, name="mlm___cls") def get_lm_head(self) -> tf.keras.layers.Layer: return self.mlm.predictions def prepare_inputs_for_generation(self, inputs, past_key_values=None, attention_mask=None, **model_kwargs): # cut decoder_input_ids if past is used if past_key_values: inputs = tf.expand_dims(inputs[:, -1], -1) return { "input_ids": inputs, "attention_mask": attention_mask, "past_key_values": past_key_values, "use_cache": model_kwargs["use_cache"], } @unpack_inputs @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]: r""" encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Set to `False` during training, `True` during generation labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the cross entropy classification loss. Indices should be in `[0, ..., config.vocab_size - 1]`. """ outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.mlm(sequence_output=sequence_output, training=training) loss = None if labels is not None: # shift labels to the left and cut last logit token shifted_logits = logits[:, :-1] labels = labels[:, 1:] loss = self.hf_compute_loss(labels=labels, logits=shifted_logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFCausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) class TF{{cookiecutter.camelcase_modelname}}ClassificationHead(tf.keras.layers.Layer): """Head for sentence-level classification tasks.""" def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.dense = tf.keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) self.out_proj = tf.keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj" ) if isinstance(config.hidden_act, str): self.classifier_act_fn = get_tf_activation(config.hidden_act) else: self.classifier_act_fn = config.hidden_act def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS]) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.dense(inputs=hidden_states) hidden_states = self.classifier_act_fn(hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.out_proj(hidden_states) return hidden_states @add_start_docstrings( """{{cookiecutter.modelname}} Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class TF{{cookiecutter.camelcase_modelname}}ForSequenceClassification(TF{{cookiecutter.camelcase_modelname}}PreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.{{cookiecutter.lowercase_modelname}} = TF{{cookiecutter.camelcase_modelname}}MainLayer(config, name="{{cookiecutter.lowercase_modelname}}") self.classifier = TF{{cookiecutter.camelcase_modelname}}ClassificationHead(config, name="classifier") @unpack_inputs @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) logits = self.classifier(hidden_states=outputs[0], training=training) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """{{cookiecutter.modelname}} Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class TF{{cookiecutter.camelcase_modelname}}ForMultipleChoice(TF{{cookiecutter.camelcase_modelname}}PreTrainedModel, TFMultipleChoiceLoss): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.{{cookiecutter.lowercase_modelname}} = TF{{cookiecutter.camelcase_modelname}}MainLayer(config, name="{{cookiecutter.lowercase_modelname}}") self.sequence_summary = TFSequenceSummary( config, config.initializer_range, name="sequence_summary" ) self.classifier = tf.keras.layers.Dense( units=1, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) @unpack_inputs @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = ( tf.reshape(tensor=input_ids, shape=(-1, seq_length)) if input_ids is not None else None ) flat_attention_mask = ( tf.reshape(tensor=attention_mask, shape=(-1, seq_length)) if attention_mask is not None else None ) flat_token_type_ids = ( tf.reshape(tensor=token_type_ids, shape=(-1, seq_length)) if token_type_ids is not None else None ) flat_position_ids = ( tf.reshape(tensor=position_ids, shape=(-1, seq_length)) if position_ids is not None else None ) flat_inputs_embeds = ( tf.reshape( tensor=inputs_embeds, shape=(-1, seq_length, shape_list(inputs_embeds)[3]) ) if inputs_embeds is not None else None ) outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids=flat_input_ids, attention_mask=flat_attention_mask, token_type_ids=flat_token_type_ids, position_ids=flat_position_ids, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) logits = self.sequence_summary(inputs=outputs[0], training=training) logits = self.classifier(inputs=logits) reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices)) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=reshaped_logits) if not return_dict: output = (reshaped_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFMultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """{{cookiecutter.modelname}} Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class TF{{cookiecutter.camelcase_modelname}}ForTokenClassification(TF{{cookiecutter.camelcase_modelname}}PreTrainedModel, TFTokenClassificationLoss): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.{{cookiecutter.lowercase_modelname}} = TF{{cookiecutter.camelcase_modelname}}MainLayer(config, name="{{cookiecutter.lowercase_modelname}}") self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) self.classifier = tf.keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) @unpack_inputs @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] sequence_output = self.dropout(inputs=sequence_output, training=training) logits = self.classifier(inputs=sequence_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFTokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """{{cookiecutter.modelname}} Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class TF{{cookiecutter.camelcase_modelname}}ForQuestionAnswering(TF{{cookiecutter.camelcase_modelname}}PreTrainedModel, TFQuestionAnsweringLoss): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.{{cookiecutter.lowercase_modelname}} = TF{{cookiecutter.camelcase_modelname}}MainLayer(config, name="{{cookiecutter.lowercase_modelname}}") self.qa_outputs = tf.keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" ) @unpack_inputs @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, start_positions: np.ndarray | tf.Tensor | None = None, end_positions: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: r""" start_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.qa_outputs(inputs=sequence_output) start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1) start_logits = tf.squeeze(input=start_logits, axis=-1) end_logits = tf.squeeze(input=end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {"start_position": start_positions} labels["end_position"] = end_positions loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFQuestionAnsweringModelOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) {% else %} import random from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import get_tf_activation from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_tf_outputs import ( TFBaseModelOutput, TFBaseModelOutputWithPastAndCrossAttentions, TFSeq2SeqLMOutput, TFSeq2SeqModelOutput, ) # Public API from ...modeling_tf_utils import ( DUMMY_INPUTS, TFPreTrainedModel, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ContextManagers, logging from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Config logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "{{cookiecutter.checkpoint_identifier}}" _CONFIG_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Config" _TOKENIZER_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Tokenizer" LARGE_NEGATIVE = -1e8 # Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): pad_token_id = tf.cast(pad_token_id, input_ids.dtype) decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype) start_tokens = tf.fill((shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype)) shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) # replace possible -100 values in labels by `pad_token_id` shifted_input_ids = tf.where( shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)), shifted_input_ids, ) # "Verify that `labels` has only positive values and -100" assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=shifted_input_ids.dtype)) # Make sure the assertion op is called by wrapping the result in an identity no-op with tf.control_dependencies([assert_gte0]): shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE mask_cond = tf.range(shape_list(mask)[-1]) mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask) if past_key_values_length > 0: mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1) return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1)) def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE class TF{{cookiecutter.camelcase_modelname}}LearnedPositionalEmbedding(tf.keras.layers.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs): super().__init__(num_embeddings, embedding_dim, **kwargs) def call(self, input_shape: tf.TensorShape, past_key_values_length: int = 0): """Input is expected to be of size [bsz x seqlen].""" seq_len = input_shape[1] position_ids = tf.range(seq_len, delta=1, name="range") position_ids += past_key_values_length return super().call(tf.cast(position_ids, dtype=tf.int32)) class TF{{cookiecutter.camelcase_modelname}}Attention(tf.keras.layers.Layer): """Multi-headed attention from "Attention Is All You Need""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, **kwargs, ): super().__init__(**kwargs) self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = tf.keras.layers.Dropout(dropout) self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.is_decoder = is_decoder self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj") self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj") self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj") self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj") def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) def call( self, hidden_states: tf.Tensor, key_value_states: tf.Tensor | None = None, past_key_value: Tuple[Tuple[tf.Tensor]] | None = None, attention_mask: tf.Tensor | None = None, layer_head_mask: tf.Tensor | None = None, training=False, ) -> Tuple[tf.Tensor, tf.Tensor | None]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = shape_list(hidden_states) # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = tf.concat([past_key_value[0], key_states], axis=2) value_states = tf.concat([past_key_value[1], value_states], axis=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) key_states = tf.reshape(key_states, proj_shape) value_states = tf.reshape(value_states, proj_shape) src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) tf.debugging.assert_equal( shape_list(attn_weights), [bsz * self.num_heads, tgt_len, src_len], message=f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {shape_list(attn_weights)}", ) if attention_mask is not None: tf.debugging.assert_equal( shape_list(attention_mask), [bsz, 1, tgt_len, src_len], message=f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {shape_list(attention_mask)}", ) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: tf.debugging.assert_equal( shape_list(layer_head_mask), [self.num_heads], message=f"Head mask for a single layer should be of size {(self.num_heads)}, but is {shape_list(layer_head_mask)}", ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) ) attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) tf.debugging.assert_equal( shape_list(attn_output), [bsz * self.num_heads, tgt_len, self.head_dim], message=f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {shape_list(attn_output)}", ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) ) attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) attn_output = self.out_proj(attn_output) attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) return attn_output, attn_weights, past_key_value class TF{{cookiecutter.camelcase_modelname}}EncoderLayer(tf.keras.layers.Layer): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TF{{cookiecutter.camelcase_modelname}}Attention( self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn" ) self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.dropout = tf.keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout) self.fc1 = tf.keras.layers.Dense(config.encoder_ffn_dim, name="fc1") self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training=False): """ Args: hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)* attention_mask (`tf.Tensor`): attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size *(encoder_attention_heads,)* """ residual = hidden_states hidden_states, self_attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask ) tf.debugging.assert_equal( shape_list(hidden_states), shape_list(residual), message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) return hidden_states, self_attn_weights class TF{{cookiecutter.camelcase_modelname}}DecoderLayer(tf.keras.layers.Layer): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TF{{cookiecutter.camelcase_modelname}}Attention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, name="self_attn", is_decoder=True, ) self.dropout = tf.keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout) self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.encoder_attn = TF{{cookiecutter.camelcase_modelname}}Attention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, name="encoder_attn", is_decoder=True, ) self.encoder_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm") self.fc1 = tf.keras.layers.Dense(config.decoder_ffn_dim, name="fc1") self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") def call( self, hidden_states, attention_mask: tf.Tensor | None = None, encoder_hidden_states: tf.Tensor | None = None, encoder_attention_mask: tf.Tensor | None = None, layer_head_mask: tf.Tensor | None = None, cross_attn_layer_head_mask: tf.Tensor | None = None, past_key_value: Tuple[tf.Tensor] | None = None, training=False, ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]: """ Args: hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)* attention_mask (`tf.Tensor`): attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. encoder_hidden_states (`tf.Tensor`): cross attention input to the layer of shape *(batch, seq_len, embed_dim)* encoder_attention_mask (`tf.Tensor`): encoder attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size *(decoder_attention_heads,)* cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module. *(decoder_attention_heads,)* past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states """ residual = hidden_states # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) return ( hidden_states, self_attn_weights, cross_attn_weights, present_key_value, ) class TF{{cookiecutter.camelcase_modelname}}PreTrainedModel(TFPreTrainedModel): config_class = {{cookiecutter.camelcase_modelname}}Config base_model_prefix = "model" {{cookiecutter.uppercase_modelname}}_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with (subclassing)[https://keras.io/guides/making_new_layers_and_models_via_subclassing/] then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Args: config ([`~{{cookiecutter.camelcase_modelname}}Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ {{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING = r""" Args: input_ids (`tf.Tensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) {{cookiecutter.camelcase_modelname}} uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): will be made by default and ignore pad tokens. It is not recommended to set this for most use cases. head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tf.FloatTensor`, *optional*): hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape `(batch_size, sequence_length, hidden_size)` is a sequence of past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Set to `False` during training, `True` during generation output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @keras_serializable class TF{{cookiecutter.camelcase_modelname}}Encoder(tf.keras.layers.Layer): config_class = {{cookiecutter.camelcase_modelname}}Config """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`TF{{cookiecutter.camelcase_modelname}}EncoderLayer`]. Args: config: {{cookiecutter.camelcase_modelname}}Config """ def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, embed_tokens: Optional[tf.keras.layers.Embedding] = None, **kwargs): super().__init__(**kwargs) self.config = config self.dropout = tf.keras.layers.Dropout(config.dropout) self.layerdrop = config.encoder_layerdrop self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0 self.embed_tokens = embed_tokens self.embed_positions = TF{{cookiecutter.camelcase_modelname}}LearnedPositionalEmbedding( config.max_position_embeddings, config.d_model, name="embed_positions", ) self.layers = [TF{{cookiecutter.camelcase_modelname}}EncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)] self.layernorm_embedding = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding") def get_embed_tokens(self): return self.embed_tokens def set_embed_tokens(self, embed_tokens): self.embed_tokens = embed_tokens @unpack_inputs def call( self, input_ids=None, inputs_embeds=None, attention_mask=None, head_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): """ Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, `optional): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: # if `self.embed_tokens.load_weight_prefix` is set, runs the embedding operation with the correct name # scope, so that its weights are registered with the desired name for loading/storing. When `tf.name_scope` # is used with a name ending in `/`, that name replaces the current name scope. # (embeddings with tf.name_scope: self.embed_tokens.load_weight_prefix/self.embed_tokens.name/embeddings:0) context = [] if hasattr(self.embed_tokens, "load_weight_prefix"): context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/")) with ContextManagers(context): check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = self.dropout(hidden_states, training=training) # check attention mask and invert if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _expand_mask(attention_mask) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: tf.debugging.assert_equal( shape_list(head_mask)[0], len(self.layers), message=f"The head_mask should be specified for {len(self.layers)} layers, but it is for {shape_list(head_mask)[0]}.", ) # encoder layers for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if training and (dropout_probability < self.layerdrop): # skip the layer continue hidden_states, attn = encoder_layer( hidden_states, attention_mask, head_mask[idx] if head_mask is not None else None, ) if output_attentions: all_attentions += (attn,) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) @keras_serializable class TF{{cookiecutter.camelcase_modelname}}Decoder(tf.keras.layers.Layer): config_class = {{cookiecutter.camelcase_modelname}}Config """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TF{{cookiecutter.camelcase_modelname}}DecoderLayer`] Args: config: {{cookiecutter.camelcase_modelname}}Config embed_tokens: output embedding """ def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, embed_tokens: Optional[tf.keras.layers.Embedding] = None, **kwargs): super().__init__(**kwargs) self.config = config self.padding_idx = config.pad_token_id self.embed_tokens = embed_tokens self.layerdrop = config.decoder_layerdrop self.embed_positions = TF{{cookiecutter.camelcase_modelname}}LearnedPositionalEmbedding( config.max_position_embeddings, config.d_model, name="embed_positions", ) self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0 self.layers = [TF{{cookiecutter.camelcase_modelname}}DecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)] self.layernorm_embedding = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding") self.dropout = tf.keras.layers.Dropout(config.dropout) def get_embed_tokens(self): return self.embed_tokens def set_embed_tokens(self, embed_tokens): self.embed_tokens = embed_tokens @unpack_inputs def call( self, input_ids=None, inputs_embeds=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): r""" Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") past_key_values_length = ( shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0 ) # embed positions positions = self.embed_positions(input_shape, past_key_values_length) if inputs_embeds is None: # if `self.embed_tokens.load_weight_prefix` is set, runs the embedding operation with the correct name # scope, so that its weights are registered with the desired name for loading/storing. When `tf.name_scope` # is used with a name ending in `/`, that name replaces the current name scope. # (embeddings with tf.name_scope: self.embed_tokens.load_weight_prefix/self.embed_tokens.name/embeddings:0) context = [] if hasattr(self.embed_tokens, "load_weight_prefix"): context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/")) with ContextManagers(context): check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) hidden_states = inputs_embeds attention_mask, combined_attention_mask = self.compute_combined_attns_mask( input_ids, attention_mask, input_shape, past_key_values_length ) if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1]) hidden_states = self.layernorm_embedding(hidden_states + positions) hidden_states = self.dropout(hidden_states, training=training) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attns = () if (output_attentions and encoder_hidden_states is not None) else None present_key_values = () if use_cache else None # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]: if attn_mask is not None: tf.debugging.assert_equal( shape_list(attn_mask)[0], len(self.layers), message=f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for {shape_list(attn_mask)[0]}.", ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if training and (dropout_probability < self.layerdrop): continue past_key_value = past_key_values[idx] if past_key_values is not None else None hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer( hidden_states, attention_mask=combined_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_value=past_key_value, ) if use_cache: present_key_values += (present_key_value,) if output_attentions: all_self_attns += (layer_self_attn,) if encoder_hidden_states is not None: all_cross_attns += (layer_cross_attn,) if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns else: return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attns, ) @tf.function def compute_combined_attns_mask(self, input_ids, attention_mask, input_shape, past_key_values_length): # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length) else: combined_attention_mask = _expand_mask( tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1] ) if attention_mask is None and input_ids is not None and input_shape[-1] > 1: attention_mask = tf.cast( tf.math.not_equal(input_ids, self.config.pad_token_id), input_ids.dtype ) attention_mask = tf.concat( [ tf.ones((input_shape[0], past_key_values_length), dtype=attention_mask.dtype), attention_mask, ], axis=-1, ) else: attention_mask = tf.ones((input_shape[0], input_shape[1] + past_key_values_length)) return attention_mask, combined_attention_mask @keras_serializable class TF{{cookiecutter.camelcase_modelname}}MainLayer(tf.keras.layers.Layer): config_class = {{cookiecutter.camelcase_modelname}}Config def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, **kwargs): super().__init__(**kwargs) self.config = config self.shared = tf.keras.layers.Embedding( input_dim=config.vocab_size, output_dim=config.d_model, embeddings_initializer=tf.keras.initializers.TruncatedNormal(stddev=self.config.init_std), name="model.shared" ) # Additional attribute to specify the expected name scope of the layer (for loading/storing weights) self.shared.load_weight_prefix = "model.shared" self.encoder = TF{{cookiecutter.camelcase_modelname}}Encoder(config, self.shared, name="encoder") self.decoder = TF{{cookiecutter.camelcase_modelname}}Decoder(config, self.shared, name="decoder") def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared @unpack_inputs def call( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, **kwargs ): if decoder_input_ids is None and decoder_inputs_embeds is None: use_cache = False if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput): encoder_outputs = TFBaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False elif not return_dict and not isinstance(encoder_outputs, tuple): encoder_outputs = encoder_outputs.to_tuple() decoder_outputs = self.decoder( decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return decoder_outputs + encoder_outputs return TFSeq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings( "The bare {{cookiecutter.uppercase_modelname}} Model outputting raw hidden-states without any specific head on top.", {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class TF{{cookiecutter.camelcase_modelname}}Model(TF{{cookiecutter.camelcase_modelname}}PreTrainedModel): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.model = TF{{cookiecutter.camelcase_modelname}}MainLayer(config, name="model") def get_encoder(self): return self.model.encoder def get_decoder(self): return self.model.decoder @unpack_inputs @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, **kwargs ): outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs # Copied from transformers.models.bart.modeling_tf_bart.BiasLayer class BiasLayer(tf.keras.layers.Layer): """ Bias as a layer. It is used for serialization purposes: `tf.keras.Model.save_weights` stores on a per-layer basis, so all weights have to be registered in a layer. """ def __init__(self, shape, initializer, trainable, name, **kwargs): super().__init__(name=name, **kwargs) # Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of # "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see: # https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214 self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable) def call(self, x): return x + self.bias @add_start_docstrings( "The {{cookiecutter.uppercase_modelname}} Model with a language modeling head. Can be used for summarization.", {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class TF{{cookiecutter.camelcase_modelname}}ForConditionalGeneration(TF{{cookiecutter.camelcase_modelname}}PreTrainedModel): _keys_to_ignore_on_load_unexpected = [ r"model.encoder.embed_tokens.weight", r"model.decoder.embed_tokens.weight", ] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.model = TF{{cookiecutter.camelcase_modelname}}MainLayer(config, name="model") self.use_cache = config.use_cache # final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency. self.bias_layer = BiasLayer( name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False ) def get_decoder(self): return self.model.decoder def get_encoder(self): return self.model.encoder def get_bias(self): return {"final_logits_bias": self.bias_layer.bias} def set_bias(self, value): # Replaces the existing layers containing bias for correct (de)serialization. vocab_size = value["final_logits_bias"].shape[-1] self.bias_layer = BiasLayer( name="final_logits_bias", shape=[1, vocab_size], initializer="zeros", trainable=False ) self.bias_layer.bias.assign(value["final_logits_bias"]) def get_output_embeddings(self): return self.get_input_embeddings() def set_output_embeddings(self, value): self.set_input_embeddings(value) @unpack_inputs @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, encoder_outputs: Optional[TFBaseModelOutput] = None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, ): """ Returns: Examples: ```python >>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, TF{{cookiecutter.camelcase_modelname}}ForConditionalGeneration >>> import tensorflow as tf >>> mname = '{{cookiecutter.checkpoint_identifier}}' >>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained(mname) >>> TXT = "My friends are <mask> but they eat too many carbs." >>> model = TF{{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained(mname) >>> batch = tokenizer([TXT], return_tensors='tf') >>> logits = model(inputs=batch.input_ids).logits >>> probs = tf.nn.softmax(logits[0]) >>> # probs[5] is associated with the mask token ```""" if labels is not None: use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training ) lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True) lm_logits = self.bias_layer(lm_logits) masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return TFSeq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, # index 1 of d outputs decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs cross_attentions=outputs.cross_attentions, # index 4 of d outputs encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out encoder_attentions=outputs.encoder_attentions, # 2 of e out ) def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs ): # cut decoder_input_ids if past is used if past_key_values is not None: decoder_input_ids = decoder_input_ids[:, -1:] return { "input_ids": None, # needs to be passed to make Keras.layer.__call__ happy "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, # change this to avoid caching (presumably for debugging) } def hf_compute_loss(self, labels, logits): """CrossEntropyLoss that ignores pad tokens""" loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE, ) melted_labels = tf.reshape(labels, (-1,)) active_loss = tf.not_equal(melted_labels, self.config.pad_token_id) reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) labels = tf.boolean_mask(melted_labels, active_loss) return loss_fn(labels, reduced_logits) {% endif -%}
135,870
46.960113
221
py
transformers
transformers-main/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/__init__.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule, OptionalDependencyNotAvailable, is_tokenizers_available {%- if "TensorFlow" in cookiecutter.generate_tensorflow_pytorch_and_flax %} from ...utils import is_tf_available {% endif %} {%- if "PyTorch" in cookiecutter.generate_tensorflow_pytorch_and_flax %} from ...utils import is_torch_available {% endif %} {%- if "Flax" in cookiecutter.generate_tensorflow_pytorch_and_flax %} from ...utils import is_flax_available {% endif %} _import_structure = { "configuration_{{cookiecutter.lowercase_modelname}}": ["{{cookiecutter.uppercase_modelname}}_PRETRAINED_CONFIG_ARCHIVE_MAP", "{{cookiecutter.camelcase_modelname}}Config"], "tokenization_{{cookiecutter.lowercase_modelname}}": ["{{cookiecutter.camelcase_modelname}}Tokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_{{cookiecutter.lowercase_modelname}}_fast"] = ["{{cookiecutter.camelcase_modelname}}TokenizerFast"] {%- if "PyTorch" in cookiecutter.generate_tensorflow_pytorch_and_flax %} {% if cookiecutter.is_encoder_decoder_model == "False" %} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_{{cookiecutter.lowercase_modelname}}"] = [ "{{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST", "{{cookiecutter.camelcase_modelname}}ForMaskedLM", "{{cookiecutter.camelcase_modelname}}ForCausalLM", "{{cookiecutter.camelcase_modelname}}ForMultipleChoice", "{{cookiecutter.camelcase_modelname}}ForQuestionAnswering", "{{cookiecutter.camelcase_modelname}}ForSequenceClassification", "{{cookiecutter.camelcase_modelname}}ForTokenClassification", "{{cookiecutter.camelcase_modelname}}Layer", "{{cookiecutter.camelcase_modelname}}Model", "{{cookiecutter.camelcase_modelname}}PreTrainedModel", "load_tf_weights_in_{{cookiecutter.lowercase_modelname}}", ] {% else %} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_{{cookiecutter.lowercase_modelname}}"] = [ "{{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST", "{{cookiecutter.camelcase_modelname}}ForConditionalGeneration", "{{cookiecutter.camelcase_modelname}}ForQuestionAnswering", "{{cookiecutter.camelcase_modelname}}ForSequenceClassification", "{{cookiecutter.camelcase_modelname}}ForCausalLM", "{{cookiecutter.camelcase_modelname}}Model", "{{cookiecutter.camelcase_modelname}}PreTrainedModel", ] {% endif %} {% endif %} {%- if "TensorFlow" in cookiecutter.generate_tensorflow_pytorch_and_flax %} {% if cookiecutter.is_encoder_decoder_model == "False" %} try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_{{cookiecutter.lowercase_modelname}}"] = [ "TF_{{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST", "TF{{cookiecutter.camelcase_modelname}}ForMaskedLM", "TF{{cookiecutter.camelcase_modelname}}ForCausalLM", "TF{{cookiecutter.camelcase_modelname}}ForMultipleChoice", "TF{{cookiecutter.camelcase_modelname}}ForQuestionAnswering", "TF{{cookiecutter.camelcase_modelname}}ForSequenceClassification", "TF{{cookiecutter.camelcase_modelname}}ForTokenClassification", "TF{{cookiecutter.camelcase_modelname}}Layer", "TF{{cookiecutter.camelcase_modelname}}Model", "TF{{cookiecutter.camelcase_modelname}}PreTrainedModel", ] {% else %} try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_{{cookiecutter.lowercase_modelname}}"] = [ "TF{{cookiecutter.camelcase_modelname}}ForConditionalGeneration", "TF{{cookiecutter.camelcase_modelname}}Model", "TF{{cookiecutter.camelcase_modelname}}PreTrainedModel", ] {% endif %} {% endif %} {%- if "Flax" in cookiecutter.generate_tensorflow_pytorch_and_flax %} {% if cookiecutter.is_encoder_decoder_model == "False" %} try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_flax_{{cookiecutter.lowercase_modelname}}"] = [ "Flax{{cookiecutter.camelcase_modelname}}ForMaskedLM", "Flax{{cookiecutter.camelcase_modelname}}ForCausalLM", "Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoice", "Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering", "Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification", "Flax{{cookiecutter.camelcase_modelname}}ForTokenClassification", "Flax{{cookiecutter.camelcase_modelname}}Layer", "Flax{{cookiecutter.camelcase_modelname}}Model", "Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel", ] {% else %} try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_flax_{{cookiecutter.lowercase_modelname}}"] = [ "Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration", "Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering", "Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification", "Flax{{cookiecutter.camelcase_modelname}}Model", "Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel", ] {% endif %} {% endif %} if TYPE_CHECKING: from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.uppercase_modelname}}_PRETRAINED_CONFIG_ARCHIVE_MAP, {{cookiecutter.camelcase_modelname}}Config from .tokenization_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Tokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_{{cookiecutter.lowercase_modelname}}_fast import {{cookiecutter.camelcase_modelname}}TokenizerFast {%- if "PyTorch" in cookiecutter.generate_tensorflow_pytorch_and_flax %} {% if cookiecutter.is_encoder_decoder_model == "False" %} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_{{cookiecutter.lowercase_modelname}} import ( {{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST, {{cookiecutter.camelcase_modelname}}ForMaskedLM, {{cookiecutter.camelcase_modelname}}ForCausalLM, {{cookiecutter.camelcase_modelname}}ForMultipleChoice, {{cookiecutter.camelcase_modelname}}ForQuestionAnswering, {{cookiecutter.camelcase_modelname}}ForSequenceClassification, {{cookiecutter.camelcase_modelname}}ForTokenClassification, {{cookiecutter.camelcase_modelname}}Layer, {{cookiecutter.camelcase_modelname}}Model, {{cookiecutter.camelcase_modelname}}PreTrainedModel, load_tf_weights_in_{{cookiecutter.lowercase_modelname}}, ) {% else %} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_{{cookiecutter.lowercase_modelname}} import ( {{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST, {{cookiecutter.camelcase_modelname}}ForConditionalGeneration, {{cookiecutter.camelcase_modelname}}ForCausalLM, {{cookiecutter.camelcase_modelname}}ForQuestionAnswering, {{cookiecutter.camelcase_modelname}}ForSequenceClassification, {{cookiecutter.camelcase_modelname}}Model, {{cookiecutter.camelcase_modelname}}PreTrainedModel, ) {% endif %} {% endif %} {%- if "TensorFlow" in cookiecutter.generate_tensorflow_pytorch_and_flax %} {% if cookiecutter.is_encoder_decoder_model == "False" %} try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_{{cookiecutter.lowercase_modelname}} import ( TF_{{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST, TF{{cookiecutter.camelcase_modelname}}ForMaskedLM, TF{{cookiecutter.camelcase_modelname}}ForCausalLM, TF{{cookiecutter.camelcase_modelname}}ForMultipleChoice, TF{{cookiecutter.camelcase_modelname}}ForQuestionAnswering, TF{{cookiecutter.camelcase_modelname}}ForSequenceClassification, TF{{cookiecutter.camelcase_modelname}}ForTokenClassification, TF{{cookiecutter.camelcase_modelname}}Layer, TF{{cookiecutter.camelcase_modelname}}Model, TF{{cookiecutter.camelcase_modelname}}PreTrainedModel, ) {% else %} try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_{{cookiecutter.lowercase_modelname}} import ( TF{{cookiecutter.camelcase_modelname}}ForConditionalGeneration, TF{{cookiecutter.camelcase_modelname}}Model, TF{{cookiecutter.camelcase_modelname}}PreTrainedModel, ) {% endif %} {% endif %} {%- if "Flax" in cookiecutter.generate_tensorflow_pytorch_and_flax %} {% if cookiecutter.is_encoder_decoder_model == "False" %} try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_{{cookiecutter.lowercase_modelname}} import ( Flax{{cookiecutter.camelcase_modelname}}ForMaskedLM, Flax{{cookiecutter.camelcase_modelname}}ForCausalLM, Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoice, Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering, Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification, Flax{{cookiecutter.camelcase_modelname}}ForTokenClassification, Flax{{cookiecutter.camelcase_modelname}}Layer, Flax{{cookiecutter.camelcase_modelname}}Model, Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel, ) {% else %} try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_{{cookiecutter.lowercase_modelname}} import ( Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration, Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering, Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification, Flax{{cookiecutter.camelcase_modelname}}Model, Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel, ) {% endif %} {% endif %} else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
12,270
41.756098
178
py
transformers
transformers-main/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_flax_{{cookiecutter.lowercase_modelname}}.py
# coding=utf-8 # Copyright 2022 {{cookiecutter.authors}} and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Flax {{cookiecutter.modelname}} model. """ {% if cookiecutter.is_encoder_decoder_model == "False" %} from typing import Callable, Optional, Tuple import numpy as np import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, unfreeze, freeze from flax.linen import combine_masks, make_causal_mask from flax.linen import partitioning as nn_partitioning from flax.traverse_util import flatten_dict, unflatten_dict from flax.linen.attention import dot_product_attention_weights from jax import lax from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_flax_outputs import ( FlaxBaseModelOutputWithPastAndCrossAttentions, FlaxBaseModelOutputWithPoolingAndCrossAttentions, FlaxCausalLMOutput, FlaxCausalLMOutputWithCrossAttentions, FlaxMaskedLMOutput, FlaxMultipleChoiceModelOutput, FlaxQuestionAnsweringModelOutput, FlaxSequenceClassifierOutput, FlaxTokenClassifierOutput, ) from ...modeling_flax_utils import ( ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, overwrite_call_docstring, ) from ...utils import logging from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Config logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "{{cookiecutter.checkpoint_identifier}}" _CONFIG_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Config" _TOKENIZER_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Tokenizer" {{cookiecutter.uppercase_modelname}}_START_DOCSTRING = r""" This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models) This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`~{{cookiecutter.uppercase_modelname}}Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ {{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING = r""" Args: input_ids (`numpy.ndarray` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`~{{cookiecutter.uppercase_modelname}}ConfiTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`numpy.ndarray` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`numpy.ndarray` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. head_mask (`numpy.ndarray` of shape `({0})`, `optional): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ remat = nn_partitioning.remat # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEmbeddings with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}Embeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.word_embeddings = nn.Embed( self.config.vocab_size, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.position_embeddings = nn.Embed( self.config.max_position_embeddings, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.token_type_embeddings = nn.Embed( self.config.type_vocab_size, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, input_ids, token_type_ids, position_ids, attention_mask, deterministic: bool = True): # Embed inputs_embeds = self.word_embeddings(input_ids.astype("i4")) position_embeds = self.position_embeddings(position_ids.astype("i4")) token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4")) # Sum all embeddings hidden_states = inputs_embeds + token_type_embeddings + position_embeds # Layer Norm hidden_states = self.LayerNorm(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertSelfAttention with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}SelfAttention(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config causal: bool = False dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.head_dim = self.config.hidden_size // self.config.num_attention_heads if self.config.hidden_size % self.config.num_attention_heads != 0: raise ValueError( "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads`\ : {self.config.num_attention_heads}" ) self.query = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.key = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.value = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) if self.causal: self.causal_mask = make_causal_mask( jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool" ) def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.config.hidden_size,)) @nn.compact # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention._concatenate_to_cache def _concatenate_to_cache(self, key, value, query, attention_mask): """ This function takes projected key, value states from a single input token and concatenates the states to cached states from previous steps. This function is slighly adapted from the official Flax repository: https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 """ # detect if we're initializing by absence of existing cache data. is_initialized = self.has_variable("cache", "cached_key") cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) if is_initialized: *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape # update key, value caches with our new 1d spatial slices cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements. pad_mask = jnp.broadcast_to( jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), ) attention_mask = combine_masks(pad_mask, attention_mask) return key, value, attention_mask def __call__( self, hidden_states, attention_mask, layer_head_mask, key_value_states: Optional[jnp.array] = None, init_cache: bool = False, deterministic=True, output_attentions: bool = False, ): # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None batch_size = hidden_states.shape[0] # get query proj query_states = self.query(hidden_states) # get key, value proj if is_cross_attention: # cross_attentions key_states = self.key(key_value_states) value_states = self.value(key_value_states) else: # self_attention key_states = self.key(hidden_states) value_states = self.value(hidden_states) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) # handle cache prepare causal attention mask if self.causal: query_length, key_length = query_states.shape[1], key_states.shape[1] if self.has_variable("cache", "cached_key"): mask_shift = self.variables["cache"]["cache_index"] max_decoder_length = self.variables["cache"]["cached_key"].shape[1] causal_mask = lax.dynamic_slice( self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length) ) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) # combine masks if needed if attention_mask is not None and self.causal: attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) elif self.causal: attention_mask = causal_mask elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) # During fast autoregressive decoding, we feed one position at a time, # and cache the keys and values step by step. if self.causal and (self.has_variable("cache", "cached_key") or init_cache): key_states, value_states, attention_mask = self._concatenate_to_cache( key_states, value_states, query_states, attention_mask ) # Convert the boolean attention mask to an attention bias. if attention_mask is not None: # attention mask in the form of attention bias attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), ) else: attention_bias = None dropout_rng = None if not deterministic and self.config.attention_probs_dropout_prob > 0.0: dropout_rng = self.make_rng("dropout") attn_weights = dot_product_attention_weights( query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.config.attention_probs_dropout_prob, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None, ) # Mask heads if we want to if layer_head_mask is not None: attn_weights = jnp.einsum("...hqk,h->...hqk", attn_weights, layer_head_mask) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,)) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertSelfOutput with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}SelfOutput(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, hidden_states, input_tensor, deterministic: bool = True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertAttention with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}Attention(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config causal: bool = False dtype: jnp.dtype = jnp.float32 def setup(self): self.self = Flax{{cookiecutter.camelcase_modelname}}SelfAttention(self.config, dtype=self.dtype) self.output = Flax{{cookiecutter.camelcase_modelname}}SelfOutput(self.config, dtype=self.dtype) def __call__( self, hidden_states, attention_mask, layer_head_mask, key_value_states=None, init_cache=False, deterministic=True, output_attentions: bool = False, ): # Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length) # FLAX expects: attention_mask.shape == (*batch_sizes, 1, 1, kv_length) such that it is broadcastable # with attn_weights.shape == (*batch_sizes, num_heads, q_length, kv_length) attn_outputs = self.self( hidden_states, attention_mask, layer_head_mask=layer_head_mask, key_value_states=key_value_states, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, ) attn_output = attn_outputs[0] hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attn_outputs[1],) return outputs # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertIntermediate with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}Intermediate(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.intermediate_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.activation = ACT2FN[self.config.hidden_act] def __call__(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertOutput with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}Output(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__(self, hidden_states, attention_output, deterministic: bool = True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.LayerNorm(hidden_states + attention_output) return hidden_states # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayer with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}Layer(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.attention = Flax{{cookiecutter.camelcase_modelname}}Attention(self.config, dtype=self.dtype) self.intermediate = Flax{{cookiecutter.camelcase_modelname}}Intermediate(self.config, dtype=self.dtype) self.output = Flax{{cookiecutter.camelcase_modelname}}Output(self.config, dtype=self.dtype) if self.config.add_cross_attention: self.crossattention = Flax{{cookiecutter.camelcase_modelname}}Attention(self.config, causal=False, dtype=self.dtype) def __call__( self, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, ): # Self Attention attention_outputs = self.attention( hidden_states, attention_mask, layer_head_mask=layer_head_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, ) attention_output = attention_outputs[0] # Cross-Attention Block if encoder_hidden_states is not None: cross_attention_outputs = self.crossattention( attention_output, attention_mask=encoder_attention_mask, layer_head_mask=layer_head_mask, key_value_states=encoder_hidden_states, deterministic=deterministic, output_attentions=output_attentions, ) attention_output = cross_attention_outputs[0] hidden_states = self.intermediate(attention_output) hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attention_outputs[1],) if encoder_hidden_states is not None: outputs += (cross_attention_outputs[1],) return outputs # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayerCollection with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}LayerCollection(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): if self.gradient_checkpointing: Flax{{cookiecutter.camelcase_modelname}}CheckpointLayer = remat(Flax{{cookiecutter.camelcase_modelname}}Layer, static_argnums=(5, 6, 7)) self.layers = [ Flax{{cookiecutter.camelcase_modelname}}CheckpointLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] else: self.layers = [ Flax{{cookiecutter.camelcase_modelname}}Layer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] def __call__( self, hidden_states, attention_mask, head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None # Check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.shape[0] != (len(self.layers)): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for \ {head_mask.shape[0]}." ) for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = layer( hidden_states, attention_mask, head_mask[i] if head_mask is not None else None, encoder_hidden_states, encoder_attention_mask, init_cache, deterministic, output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states,) if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEncoder with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}Encoder(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): self.layer = Flax{{cookiecutter.camelcase_modelname}}LayerCollection(self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) def __call__( self, hidden_states, attention_mask, head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): return self.layer( hidden_states, attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPooler with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}Pooler(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) def __call__(self, hidden_states): cls_hidden_state = hidden_states[:, 0] cls_hidden_state = self.dense(cls_hidden_state) return nn.tanh(cls_hidden_state) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPredictionHeadTransform with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}PredictionHeadTransform(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype) self.activation = ACT2FN[self.config.hidden_act] self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) return self.LayerNorm(hidden_states) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLMPredictionHead with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}LMPredictionHead(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros def setup(self): self.transform = Flax{{cookiecutter.camelcase_modelname}}PredictionHeadTransform(self.config, dtype=self.dtype) self.decoder = nn.Dense(self.config.vocab_size, dtype=self.dtype, use_bias=False) self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,)) def __call__(self, hidden_states, shared_embedding=None): hidden_states = self.transform(hidden_states) if shared_embedding is not None: hidden_states = self.decoder.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) else: hidden_states = self.decoder(hidden_states) hidden_states += self.bias return hidden_states # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertOnlyMLMHead with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}OnlyMLMHead(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 def setup(self): self.predictions = Flax{{cookiecutter.camelcase_modelname}}LMPredictionHead(self.config, dtype=self.dtype) def __call__(self, hidden_states, shared_embedding=None): hidden_states = self.predictions(hidden_states, shared_embedding=shared_embedding) return hidden_states # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertOnlyNSPHead with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}OnlyNSPHead(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): self.seq_relationship = nn.Dense(2, dtype=self.dtype) def __call__(self, pooled_output): return self.seq_relationship(pooled_output) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainingHeads with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}PreTrainingHeads(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 def setup(self): self.predictions = Flax{{cookiecutter.camelcase_modelname}}LMPredictionHead(self.config, dtype=self.dtype) self.seq_relationship = nn.Dense(2, dtype=self.dtype) def __call__(self, hidden_states, pooled_output, shared_embedding=None): prediction_scores = self.predictions(hidden_states, shared_embedding=shared_embedding) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = {{cookiecutter.camelcase_modelname}}Config base_model_prefix = "{{cookiecutter.lowercase_modelname}}" module_class: nn.Module = None def __init__( self, config: {{cookiecutter.camelcase_modelname}}Config, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, gradient_checkpointing: bool = False, **kwargs ): module = self.module_class(config=config, dtype=dtype, gradient_checkpointing=gradient_checkpointing, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainedModel.enable_gradient_checkpointing def enable_gradient_checkpointing(self): self._module = self.module_class( config=self.config, dtype=self.dtype, gradient_checkpointing=True, ) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainedModel.init_weights with Bert->{{cookiecutter.camelcase_modelname}} def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") token_type_ids = jnp.zeros_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape) attention_mask = jnp.ones_like(input_ids) head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} if self.config.add_cross_attention: encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,)) encoder_attention_mask = attention_mask module_init_outputs = self.module.init( rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, encoder_hidden_states, encoder_attention_mask, return_dict=False, ) else: module_init_outputs = self.module.init( rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, return_dict=False ) random_params = module_init_outputs["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainedModel.init_cache with Bert->{{cookiecutter.camelcase_modelname}} def init_cache(self, batch_size, max_length): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. """ # init input variables to retrieve cache input_ids = jnp.ones((batch_size, max_length)) attention_mask = jnp.ones_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) init_variables = self.module.init( jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True ) return unfreeze(init_variables["cache"]) @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainedModel.__call__ with Bert->{{cookiecutter.camelcase_modelname}} def __call__( self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, params: dict = None, dropout_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, past_key_values: dict = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict # init input tensors if not passed if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if head_mask is None: head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} if self.config.add_cross_attention: # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed # down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be # changed by FlaxBertAttention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False outputs = self.module.apply( inputs, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), token_type_ids=jnp.array(token_type_ids, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), head_mask=jnp.array(head_mask, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, deterministic=not train, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, rngs=rngs, mutable=mutable, ) # add updated cache to model output if past_key_values is not None and return_dict: outputs, past_key_values = outputs outputs["past_key_values"] = unfreeze(past_key_values["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs, past_key_values = outputs outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:] else: outputs = self.module.apply( inputs, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), token_type_ids=jnp.array(token_type_ids, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), head_mask=jnp.array(head_mask, dtype="i4"), deterministic=not train, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, rngs=rngs, ) return outputs # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertModule with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}Module(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation add_pooling_layer: bool = True gradient_checkpointing: bool = False def setup(self): self.embeddings = Flax{{cookiecutter.camelcase_modelname}}Embeddings(self.config, dtype=self.dtype) self.encoder = Flax{{cookiecutter.camelcase_modelname}}Encoder(self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.pooler = Flax{{cookiecutter.camelcase_modelname}}Pooler(self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids: Optional[jnp.ndarray] = None, position_ids: Optional[jnp.ndarray] = None, head_mask: Optional[jnp.ndarray] = None, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # make sure `token_type_ids` is correctly initialized when not passed if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) # make sure `position_ids` is correctly initialized when not passed if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) hidden_states = self.embeddings( input_ids, token_type_ids, position_ids, attention_mask, deterministic=deterministic ) outputs = self.encoder( hidden_states, attention_mask, head_mask=head_mask, deterministic=deterministic, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] pooled = self.pooler(hidden_states) if self.add_pooling_layer else None if not return_dict: # if pooled is None, don't return it if pooled is None: return (hidden_states,) + outputs[1:] return (hidden_states, pooled) + outputs[1:] return FlaxBaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=hidden_states, pooler_output=pooled, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) add_start_docstrings( "The bare {{cookiecutter.camelcase_modelname}} Model transformer outputting raw hidden-states without any specific head on top.", {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class Flax{{cookiecutter.camelcase_modelname}}Model(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel): module_class = Flax{{cookiecutter.camelcase_modelname}}Module class Flax{{cookiecutter.camelcase_modelname}}ForMaskedLMModule(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.{{cookiecutter.lowercase_modelname}} = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, add_pooling_layer=False, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.cls = Flax{{cookiecutter.camelcase_modelname}}OnlyMLMHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.{{cookiecutter.lowercase_modelname}}.variables["params"]["embeddings"]["word_embeddings"]["embedding"] else: shared_embedding = None # Compute the prediction scores logits = self.cls(hidden_states, shared_embedding=shared_embedding) if not return_dict: return (logits,) + outputs[1:] return FlaxCausalLMOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings("""{{cookiecutter.camelcase_modelname}} Model with a `language modeling` head on top for MLM training. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING) class Flax{{cookiecutter.camelcase_modelname}}ForMaskedLM(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel): module_class = Flax{{cookiecutter.camelcase_modelname}}ForMaskedLMModule append_call_sample_docstring( Flax{{cookiecutter.camelcase_modelname}}ForMaskedLM, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC ) class Flax{{cookiecutter.camelcase_modelname}}ForCausalLMModule(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.{{cookiecutter.lowercase_modelname}} = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, add_pooling_layer=False, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.cls = Flax{{cookiecutter.camelcase_modelname}}OnlyMLMHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.{{cookiecutter.lowercase_modelname}}.variables["params"]["embeddings"]["word_embeddings"]["embedding"] else: shared_embedding = None # Compute the prediction scores logits = self.cls(hidden_states, shared_embedding=shared_embedding) if not return_dict: return (logits,) + outputs[1:] return FlaxCausalLMOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings("""{{cookiecutter.camelcase_modelname}} Model with a `language modeling` head on top for CLM training. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING) class Flax{{cookiecutter.camelcase_modelname}}ForCausalLM(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel): module_class = Flax{{cookiecutter.camelcase_modelname}}ForCausalLMModule append_call_sample_docstring( Flax{{cookiecutter.camelcase_modelname}}ForCausalLM, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutput, _CONFIG_FOR_DOC ) class Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassificationModule(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.{{cookiecutter.lowercase_modelname}} = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.classifier = nn.Dense( self.config.num_labels, dtype=self.dtype, ) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, deterministic=deterministic) logits = self.classifier(pooled_output) if not return_dict: return (logits,) + outputs[2:] return FlaxSequenceClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ {{cookiecutter.camelcase_modelname}} Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel): module_class = Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassificationModule append_call_sample_docstring( Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxSequenceClassifierOutput, _CONFIG_FOR_DOC, ) class Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoiceModule(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.{{cookiecutter.lowercase_modelname}} = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.classifier = nn.Dense(1, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): num_choices = input_ids.shape[1] input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None # Model outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, deterministic=deterministic) logits = self.classifier(pooled_output) reshaped_logits = logits.reshape(-1, num_choices) if not return_dict: return (reshaped_logits,) + outputs[2:] return FlaxMultipleChoiceModelOutput( logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ {{cookiecutter.camelcase_modelname}} Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoice(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel): module_class = Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoiceModule overwrite_call_docstring( Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoice, {{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) append_call_sample_docstring( Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoice, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxMultipleChoiceModelOutput, _CONFIG_FOR_DOC ) class Flax{{cookiecutter.camelcase_modelname}}ForTokenClassificationModule(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.{{cookiecutter.lowercase_modelname}} = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, dtype=self.dtype, add_pooling_layer=False, gradient_checkpointing=self.gradient_checkpointing) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states, deterministic=deterministic) logits = self.classifier(hidden_states) if not return_dict: return (logits,) + outputs[1:] return FlaxTokenClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ {{cookiecutter.camelcase_modelname}} Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class Flax{{cookiecutter.camelcase_modelname}}ForTokenClassification(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel): module_class = Flax{{cookiecutter.camelcase_modelname}}ForTokenClassificationModule append_call_sample_docstring( Flax{{cookiecutter.camelcase_modelname}}ForTokenClassification, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxTokenClassifierOutput, _CONFIG_FOR_DOC ) class Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnsweringModule(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.{{cookiecutter.lowercase_modelname}} = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, dtype=self.dtype, add_pooling_layer=False, gradient_checkpointing=self.gradient_checkpointing) self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.qa_outputs(hidden_states) start_logits, end_logits = logits.split(self.config.num_labels, axis=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if not return_dict: return (start_logits, end_logits) + outputs[1:] return FlaxQuestionAnsweringModelOutput( start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ {{cookiecutter.camelcase_modelname}} Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel): module_class = Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnsweringModule append_call_sample_docstring( Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxQuestionAnsweringModelOutput, _CONFIG_FOR_DOC, ) class Flax{{cookiecutter.camelcase_modelname}}ForCausalLMModule(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.{{cookiecutter.lowercase_modelname}} = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, add_pooling_layer=False, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.cls = Flax{{cookiecutter.camelcase_modelname}}OnlyMLMHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, position_ids, token_type_ids: Optional[jnp.ndarray] = None, head_mask: Optional[jnp.ndarray] = None, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids, attention_mask, token_type_ids, position_ids, head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.{{cookiecutter.lowercase_modelname}}.variables["params"]["embeddings"]["word_embeddings"]["embedding"] else: shared_embedding = None # Compute the prediction scores logits = self.cls(hidden_states, shared_embedding=shared_embedding) if not return_dict: return (logits,) + outputs[1:] return FlaxCausalLMOutputWithCrossAttentions( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @add_start_docstrings( """ {{cookiecutter.camelcase_modelname}} Model with a language modeling head on top (a linear layer on top of the hidden-states output) e.g for autoregressive tasks. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class Flax{{cookiecutter.camelcase_modelname}}ForCausalLM(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel): module_class = Flax{{cookiecutter.camelcase_modelname}}ForCausalLMModule def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jnp.DeviceArray] = None): # initializing the cache batch_size, seq_length = input_ids.shape past_key_values = self.init_cache(batch_size, max_length) # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. # But since the decoder uses a causal mask, those positions are masked anyway. # Thus, we can create a single static attention_mask here, which is more efficient for compilation extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") if attention_mask is not None: position_ids = attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)) return { "past_key_values": past_key_values, "attention_mask": extended_attention_mask, "position_ids": position_ids, } def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1 return model_kwargs append_call_sample_docstring( Flax{{cookiecutter.camelcase_modelname}}ForCausalLM, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutputWithCrossAttentions, _CONFIG_FOR_DOC, ) {# encoder_decoder #} {% else %} import math import random from functools import partial from typing import Callable, Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, unfreeze, freeze from flax.linen import combine_masks, make_causal_mask from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from jax.random import PRNGKey from ...utils import add_start_docstrings, replace_return_docstrings from ...modeling_flax_outputs import ( FlaxBaseModelOutput, FlaxBaseModelOutputWithPastAndCrossAttentions, FlaxCausalLMOutputWithCrossAttentions, FlaxSeq2SeqLMOutput, FlaxSeq2SeqModelOutput, FlaxSeq2SeqQuestionAnsweringModelOutput, FlaxSeq2SeqSequenceClassifierOutput, ) from ...modeling_flax_utils import ( ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, append_replace_return_docstrings, overwrite_call_docstring, ) from ...utils import logging from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Config logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "{{cookiecutter.checkpoint_identifier}}" _CONFIG_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Config" _TOKENIZER_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Tokenizer" {{cookiecutter.uppercase_modelname}}_START_DOCSTRING = r""" This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Flax Linen [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`~{{cookiecutter.camelcase_modelname}}Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ {{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ {{cookiecutter.uppercase_modelname}}_ENCODE_INPUTS_DOCSTRING = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ {{cookiecutter.uppercase_modelname}}_DECODE_INPUTS_DOCSTRING = r""" Args: decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. encoder_outputs (`tuple(tuple(jnp.ndarray)`): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`): Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray: """ Shift input ids one token to the right. """ shifted_input_ids = jnp.roll(input_ids, 1, axis=-1) shifted_input_ids = shifted_input_ids.at[(..., 0)].set(decoder_start_token_id) # replace possible -100 values in labels by `pad_token_id` shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) return shifted_input_ids class Flax{{cookiecutter.camelcase_modelname}}Attention(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config embed_dim: int num_heads: int dropout: float = 0.0 causal: bool = False bias: bool = True dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self) -> None: self.head_dim = self.embed_dim // self.num_heads assert ( self.head_dim * self.num_heads == self.embed_dim ), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})." dense = partial( nn.Dense, self.embed_dim, use_bias=self.bias, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), ) self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense() self.out_proj = dense() self.dropout_layer = nn.Dropout(rate=self.dropout) if self.causal: self.causal_mask = make_causal_mask( jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool" ) def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,)) @nn.compact def _concatenate_to_cache(self, key, value, query, attention_mask): """ This function takes projected key, value states from a single input token and concatenates the states to cached states from previous steps. This function is slighly adapted from the official Flax repository: https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 """ # detect if we're initializing by absence of existing cache data. is_initialized = self.has_variable("cache", "cached_key") cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) if is_initialized: *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape # update key, value caches with our new 1d spatial slices cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements. pad_mask = jnp.broadcast_to( jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), ) attention_mask = combine_masks(pad_mask, attention_mask) return key, value, attention_mask def __call__( self, hidden_states: jnp.ndarray, key_value_states: Optional[jnp.ndarray] = None, attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, ) -> Tuple[jnp.ndarray]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None batch_size = hidden_states.shape[0] # get query proj query_states = self.q_proj(hidden_states) # get key, value proj if is_cross_attention: # cross_attentions key_states = self.k_proj(key_value_states) value_states = self.v_proj(key_value_states) else: # self_attention key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) # handle cache prepare causal attention mask if self.causal: query_length, key_length = query_states.shape[1], key_states.shape[1] if self.has_variable("cache", "cached_key"): mask_shift = self.variables["cache"]["cache_index"] max_decoder_length = self.variables["cache"]["cached_key"].shape[1] causal_mask = lax.dynamic_slice( self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length) ) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) # combine masks if needed if attention_mask is not None and self.causal: attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) elif self.causal: attention_mask = causal_mask elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) # During fast autoregressive decoding, we feed one position at a time, # and cache the keys and values step by step. if self.causal and (self.has_variable("cache", "cached_key") or init_cache): key_states, value_states, attention_mask = self._concatenate_to_cache( key_states, value_states, query_states, attention_mask ) # Convert the boolean attention mask to an attention bias. if attention_mask is not None: # attention mask in the form of attention bias attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), ) else: attention_bias = None dropout_rng = None if not deterministic and self.dropout > 0.0: dropout_rng = self.make_rng("dropout") attn_weights = dot_product_attention_weights( query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.dropout, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None, ) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) attn_output = self._merge_heads(attn_output) attn_output = self.out_proj(attn_output) return attn_output, attn_weights class Flax{{cookiecutter.camelcase_modelname}}EncoderLayer(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.embed_dim = self.config.d_model self.self_attn = Flax{{cookiecutter.camelcase_modelname}}Attention( config=self.config, embed_dim=self.embed_dim, num_heads=self.config.encoder_attention_heads, dropout=self.config.attention_dropout, dtype=self.dtype ) self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype) self.dropout_layer = nn.Dropout(rate=self.config.dropout) self.activation_fn = ACT2FN[self.config.activation_function] self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout) self.fc1 = nn.Dense( self.config.encoder_ffn_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), ) self.fc2 = nn.Dense( self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std) ) self.final_layer_norm = nn.LayerNorm(dtype=self.dtype) def __call__( self, hidden_states: jnp.ndarray, attention_mask: jnp.ndarray, output_attentions: bool = True, deterministic: bool = True, ) -> Tuple[jnp.ndarray]: residual = hidden_states hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class Flax{{cookiecutter.camelcase_modelname}}EncoderLayerCollection(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.layers = [ Flax{{cookiecutter.camelcase_modelname}}EncoderLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.encoder_layers) ] self.layerdrop = self.config.encoder_layerdrop def __call__( self, hidden_states, attention_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for encoder_layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if not deterministic and (dropout_probability < self.layerdrop): # skip the layer layer_outputs = (None, None) else: layer_outputs = encoder_layer( hidden_states, attention_mask, output_attentions, deterministic, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states, all_hidden_states, all_attentions) if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) class Flax{{cookiecutter.camelcase_modelname}}DecoderLayer(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.embed_dim = self.config.d_model self.self_attn = Flax{{cookiecutter.camelcase_modelname}}Attention( config=self.config, embed_dim=self.embed_dim, num_heads=self.config.decoder_attention_heads, dropout=self.config.attention_dropout, causal=True, dtype=self.dtype, ) self.dropout_layer = nn.Dropout(rate=self.config.dropout) self.activation_fn = ACT2FN[self.config.activation_function] self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout) self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype) self.encoder_attn = Flax{{cookiecutter.camelcase_modelname}}Attention( config=self.config, embed_dim=self.embed_dim, num_heads=self.config.decoder_attention_heads, dropout=self.config.attention_dropout, dtype=self.dtype, ) self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype) self.fc1 = nn.Dense( self.config.decoder_ffn_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), ) self.fc2 = nn.Dense( self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std) ) self.final_layer_norm = nn.LayerNorm(dtype=self.dtype) def __call__( self, hidden_states: jnp.ndarray, attention_mask: jnp.ndarray, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, output_attentions: bool = True, deterministic: bool = True, ) -> Tuple[jnp.ndarray]: residual = hidden_states # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache ) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Cross-Attention Block cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states, cross_attn_weights = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, ) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs class Flax{{cookiecutter.camelcase_modelname}}DecoderLayerCollection(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.layers = [ Flax{{cookiecutter.camelcase_modelname}}DecoderLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.decoder_layers) ] self.layerdrop = self.config.decoder_layerdrop def __call__( self, hidden_states, attention_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if not deterministic and (dropout_probability < self.layerdrop): layer_outputs = (None, None, None) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, output_attentions=output_attentions, deterministic=deterministic, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) outputs = [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions] if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) class Flax{{cookiecutter.camelcase_modelname}}ClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" config: {{cookiecutter.camelcase_modelname}}Config inner_dim: int num_classes: int pooler_dropout: float dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense( self.inner_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std) ) self.dropout = nn.Dropout(rate=self.pooler_dropout) self.out_proj = nn.Dense( self.num_classes, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), ) def __call__(self, hidden_states: jnp.ndarray, deterministic: bool): hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.dense(hidden_states) hidden_states = jnp.tanh(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.out_proj(hidden_states) return hidden_states class Flax{{cookiecutter.camelcase_modelname}}Encoder(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation embed_tokens: Optional[nn.Embed] = None def setup(self): self.dropout_layer = nn.Dropout(rate=self.config.dropout) embed_dim = self.config.d_model self.padding_idx = self.config.pad_token_id self.max_source_positions = self.config.max_position_embeddings self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0 if self.embed_tokens is None: self.embed_tokens = nn.Embed( self.config.vocab_size, embed_dim, embedding_init=jax.nn.initializers.normal(self.config.init_std), ) # {{cookiecutter.camelcase_modelname}} is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models don't have this hack self.offset = 2 self.embed_positions = nn.Embed( self.config.max_position_embeddings + self.offset, embed_dim, embedding_init=jax.nn.initializers.normal(self.config.init_std), ) self.layers = Flax{{cookiecutter.camelcase_modelname}}EncoderLayerCollection(self.config, self.dtype) self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype) def __call__( self, input_ids, attention_mask, position_ids, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): input_shape = input_ids.shape input_ids = input_ids.reshape(-1, input_shape[-1]) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(position_ids + self.offset) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) outputs = self.layers( hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return outputs return FlaxBaseModelOutput( last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class Flax{{cookiecutter.camelcase_modelname}}Decoder(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation embed_tokens: Optional[nn.Embed] = None def setup(self): self.dropout_layer = nn.Dropout(rate=self.config.dropout) embed_dim = self.config.d_model self.padding_idx = self.config.pad_token_id self.max_target_positions = self.config.max_position_embeddings self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0 if self.embed_tokens is None: self.embed_tokens = nn.Embed( self.config.vocab_size, embed_dim, embedding_init=jax.nn.initializers.normal(self.config.init_std), ) # {{cookiecutter.camelcase_modelname}} is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models don't have this hack self.offset = 2 self.embed_positions = nn.Embed( self.config.max_position_embeddings + self.offset, embed_dim, embedding_init=jax.nn.initializers.normal(self.config.init_std), ) self.layers = Flax{{cookiecutter.camelcase_modelname}}DecoderLayerCollection(self.config, self.dtype) self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype) def __call__( self, input_ids, attention_mask, position_ids, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): input_shape = input_ids.shape input_ids = input_ids.reshape(-1, input_shape[-1]) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale # embed positions positions = self.embed_positions(position_ids + self.offset) hidden_states = inputs_embeds + positions hidden_states = self.layernorm_embedding(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) outputs = self.layers( hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return outputs return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) class Flax{{cookiecutter.camelcase_modelname}}Module(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.shared = nn.Embed( self.config.vocab_size, self.config.d_model, embedding_init=jax.nn.initializers.normal(self.config.init_std), ) self.encoder = Flax{{cookiecutter.camelcase_modelname}}Encoder(self.config, dtype=self.dtype, embed_tokens=self.shared) self.decoder = Flax{{cookiecutter.camelcase_modelname}}Decoder(self.config, dtype=self.dtype, embed_tokens=self.shared) def _get_encoder_module(self): return self.encoder def _get_decoder_module(self): return self.decoder def __call__( self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) if not return_dict: return decoder_outputs + encoder_outputs return FlaxSeq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) class Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel(FlaxPreTrainedModel): config_class = {{cookiecutter.camelcase_modelname}}Config base_model_prefix: str = "model" module_class: nn.Module = None def __init__( self, config: {{cookiecutter.camelcase_modelname}}Config, input_shape: Tuple[int] = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs ): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") # make sure initialization pass will work for Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassificationModule input_ids = input_ids.at[(..., -1)].set(self.config.eos_token_id) attention_mask = jnp.ones_like(input_ids) decoder_input_ids = input_ids decoder_attention_mask = jnp.ones_like(input_ids) batch_size, sequence_length = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init( rngs, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, )["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def init_cache(self, batch_size, max_length, encoder_outputs): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`): `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. """ # init input variables to retrieve cache decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4") decoder_attention_mask = jnp.ones_like(decoder_input_ids) decoder_position_ids = jnp.broadcast_to( jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape ) def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module( decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs, ) init_variables = self.module.init( jax.random.PRNGKey(0), decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], init_cache=True, method=_decoder_forward, # we only need to call the decoder to init the cache ) return unfreeze(init_variables["cache"]) @add_start_docstrings({{cookiecutter.uppercase_modelname}}_ENCODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class={{cookiecutter.camelcase_modelname}}Config) def encode( self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, position_ids: Optional[jnp.ndarray] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration >>> model = Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> text = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, max_length=1024, return_tensors='np') >>> encoder_outputs = model.encode(**inputs) ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if position_ids is None: batch_size, sequence_length = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs): encode_module = module._get_encoder_module() return encode_module(input_ids, attention_mask, position_ids, **kwargs) return self.module.apply( {"params": params or self.params}, input_ids=jnp.array(input_ids, dtype="i4"), attention_mask=jnp.array(attention_mask, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward, ) @add_start_docstrings({{cookiecutter.uppercase_modelname}}_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class={{cookiecutter.camelcase_modelname}}Config) def decode( self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, decoder_position_ids: Optional[jnp.ndarray] = None, past_key_values: dict = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> import jax.numpy as jnp >>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration >>> model = Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> text = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, max_length=1024, return_tensors='np') >>> encoder_outputs = model.encode(**inputs) >>> decoder_start_token_id = model.config.decoder_start_token_id >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id >>> outputs = model.decode(decoder_input_ids, encoder_outputs) >>> last_decoder_hidden_states = outputs.last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: batch_size, sequence_length = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) batch_size, sequence_length = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) if decoder_position_ids is None: if past_key_values is not None: raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.") decoder_position_ids = jnp.broadcast_to( jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) ) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} # if past_key_values are passed then cache is already initialized a private flag init_cache has to be # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that # it can be changed by Flax{{cookiecutter.camelcase_modelname}}Attention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module( decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs, ) outputs = self.module.apply( inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward, ) # add updated cache to model output if past_key_values is not None and return_dict: outputs, past = outputs outputs["past_key_values"] = unfreeze(past["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs, past = outputs outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] return outputs def __call__( self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, decoder_input_ids: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, position_ids: Optional[jnp.ndarray] = None, decoder_position_ids: Optional[jnp.ndarray] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict # prepare encoder inputs if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if position_ids is None: batch_size, sequence_length = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) # prepare decoder inputs if decoder_input_ids is None: decoder_input_ids = shift_tokens_right( input_ids, self.config.pad_token_id, decoder_start_token_id=self.config.decoder_start_token_id ) if decoder_attention_mask is None: decoder_attention_mask = jnp.ones_like(decoder_input_ids) if decoder_position_ids is None: batch_size, sequence_length = decoder_input_ids.shape decoder_position_ids = jnp.broadcast_to( jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) ) # Handle any PRNG if needed rngs = {"dropout": dropout_rng} if dropout_rng is not None else {} return self.module.apply( {"params": params or self.params}, input_ids=jnp.array(input_ids, dtype="i4"), attention_mask=jnp.array(attention_mask, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, ) @add_start_docstrings( "The bare {{cookiecutter.camelcase_modelname}} Model transformer outputting raw hidden-states without any specific head on top.", {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class Flax{{cookiecutter.camelcase_modelname}}Model(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation module_class = Flax{{cookiecutter.camelcase_modelname}}Module append_call_sample_docstring( Flax{{cookiecutter.camelcase_modelname}}Model, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC ) class Flax{{cookiecutter.camelcase_modelname}}ForConditionalGenerationModule(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros def setup(self): self.model = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, dtype=self.dtype) self.lm_head = nn.Dense( self.model.shared.num_embeddings, use_bias=False, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), ) self.final_logits_bias = self.param("final_logits_bias", self.bias_init, (1, self.model.shared.num_embeddings)) def _get_encoder_module(self): return self.model.encoder def _get_decoder_module(self): return self.model.decoder def __call__( self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, position_ids=position_ids, decoder_position_ids=decoder_position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.model.variables["params"]["shared"]["embedding"] lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) else: lm_logits = self.lm_head(hidden_states) lm_logits += self.final_logits_bias.astype(self.dtype) if not return_dict: output = (lm_logits,) + outputs[1:] return output return FlaxSeq2SeqLMOutput( logits=lm_logits, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings( "The {{cookiecutter.uppercase_modelname}} Model with a language modeling head. Can be used for summarization.", {{cookiecutter.uppercase_modelname}}_START_DOCSTRING ) class Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel): module_class = Flax{{cookiecutter.camelcase_modelname}}ForConditionalGenerationModule dtype: jnp.dtype = jnp.float32 @add_start_docstrings({{cookiecutter.uppercase_modelname}}_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class={{cookiecutter.camelcase_modelname}}Config) def decode( self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, decoder_position_ids: Optional[jnp.ndarray] = None, past_key_values: dict = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, deterministic: bool = True, params: dict = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> import jax.numpy as jnp >>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration >>> model = Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> text = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, max_length=1024, return_tensors='np') >>> encoder_outputs = model.encode(**inputs) >>> decoder_start_token_id = model.config.decoder_start_token_id >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id >>> outputs = model.decode(decoder_input_ids, encoder_outputs) >>> logits = outputs.logits ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: batch_size, sequence_length = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) batch_size, sequence_length = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) if decoder_position_ids is None: if past_key_values is not None: raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.") decoder_position_ids = jnp.broadcast_to( jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) ) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} # if past_key_values are passed then cache is already initialized a private flag init_cache has to be # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that # it can be changed by Flax{{cookiecutter.camelcase_modelname}}Attention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() outputs = decoder_module( decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = module.model.variables["params"]["shared"]["embedding"] lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) else: lm_logits = module.lm_head(hidden_states) lm_logits += module.final_logits_bias.astype(self.dtype) return lm_logits, outputs outputs = self.module.apply( inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, rngs=rngs, mutable=mutable, method=_decoder_forward, ) if past_key_values is None: lm_logits, decoder_outputs = outputs else: (lm_logits, decoder_outputs), past = outputs if return_dict: outputs = FlaxCausalLMOutputWithCrossAttentions( logits=lm_logits, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, ) else: outputs = (lm_logits,) + decoder_outputs[1:] # add updated cache to model output if past_key_values is not None and return_dict: outputs["past_key_values"] = unfreeze(past["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] return outputs def prepare_inputs_for_generation( self, decoder_input_ids, max_length, attention_mask: Optional[jnp.DeviceArray] = None, decoder_attention_mask: Optional[jnp.DeviceArray] = None, encoder_outputs=None, **kwargs ): # initializing the cache batch_size, seq_length = decoder_input_ids.shape past_key_values = self.init_cache(batch_size, max_length, encoder_outputs) # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. # But since the decoder uses a causal mask, those positions are masked anyways. # Thus we can create a single static attention_mask here, which is more efficient for compilation extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") if decoder_attention_mask is not None: position_ids = decoder_attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)) return { "past_key_values": past_key_values, "encoder_outputs": encoder_outputs, "encoder_attention_mask": attention_mask, "decoder_attention_mask": extended_attention_mask, "decoder_position_ids": position_ids, } def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1 return model_kwargs FLAX_{{cookiecutter.uppercase_modelname}}_CONDITIONAL_GENERATION_DOCSTRING = """ Returns: Summarization example: ```python >>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration >>> model = Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='np') >>> # Generate Summary >>> summary_ids = model.generate(inputs['input_ids']).sequences >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)) ``` Mask filling example: ```python >>> import jax >>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration >>> model = Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> TXT = "My friends are <mask> but they eat too many carbs." >>> input_ids = tokenizer([TXT], return_tensors='np')['input_ids'] >>> logits = model(input_ids).logits >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() >>> probs = jax.nn.softmax(logits[0, masked_index], axis=0) >>> values, predictions = jax.lax.top_k(probs, k=1) >>> tokenizer.decode(predictions).split() ``` """ overwrite_call_docstring( Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration, {{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING + FLAX_{{cookiecutter.uppercase_modelname}}_CONDITIONAL_GENERATION_DOCSTRING ) append_replace_return_docstrings( Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC ) class Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassificationModule(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 num_labels: Optional[int] = None def setup(self): self.model = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, dtype=self.dtype) self.classification_head = Flax{{cookiecutter.camelcase_modelname}}ClassificationHead( config=self.config, inner_dim=self.config.d_model, num_classes=self.num_labels if self.num_labels is not None else self.config.num_labels, pooler_dropout=self.config.classifier_dropout, ) def _get_encoder_module(self): return self.model.encoder def _get_decoder_module(self): return self.model.decoder def __call__( self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, position_ids=position_ids, decoder_position_ids=decoder_position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) hidden_states = outputs[0] # last hidden state eos_mask = jnp.where(input_ids == self.config.eos_token_id, 1, 0) # The first condition is necessary to overcome jax._src.errors.ConcretizationTypeError during JIT compilation if type(eos_mask) != jax.interpreters.partial_eval.DynamicJaxprTracer: if len(jnp.unique(eos_mask.sum(1))) > 1: raise ValueError("All examples must have the same number of <eos> tokens.") if any(eos_mask.sum(1) == 0): raise ValueError("There are missing <eos> tokens in input_ids") # Ensure to keep 1 only for the last <eos> token for each example eos_mask_noised = eos_mask + jnp.arange(eos_mask.shape[1]) * 1e-6 eos_mask = jnp.where(eos_mask_noised == eos_mask_noised.max(1).reshape(-1, 1), 1, 0) sentence_representation = jnp.einsum("ijk, ij -> ijk", hidden_states, eos_mask).sum(1) logits = self.classification_head(sentence_representation, deterministic=deterministic) if not return_dict: output = (logits,) + outputs[1:] return output return FlaxSeq2SeqSequenceClassifierOutput( logits=logits, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings( """ {{cookiecutter.camelcase_modelname}} model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel): module_class = Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassificationModule dtype = jnp.float32 append_call_sample_docstring( Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqSequenceClassifierOutput, _CONFIG_FOR_DOC, ) class Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnsweringModule(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 num_labels = 2 def setup(self): self.model = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, dtype=self.dtype) self.qa_outputs = nn.Dense( self.num_labels, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std) ) def _get_encoder_module(self): return self.model.encoder def _get_decoder_module(self): return self.model.decoder def __call__( self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, position_ids=position_ids, decoder_position_ids=decoder_position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = jnp.split(logits, logits.shape[-1], axis=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if not return_dict: output = (start_logits, end_logits) + outputs[1:] return output return FlaxSeq2SeqQuestionAnsweringModelOutput( start_logits=start_logits, end_logits=end_logits, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings( """ {{cookiecutter.uppercase_modelname}} Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel): module_class = Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnsweringModule dtype = jnp.float32 append_call_sample_docstring( Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqQuestionAnsweringModelOutput, _CONFIG_FOR_DOC, ) {% endif -%}
139,390
42.008639
213
py
transformers
transformers-main/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py
# coding=utf-8 # Copyright 2022 {{cookiecutter.authors}} The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch {{cookiecutter.modelname}} model. """ {% if cookiecutter.is_encoder_decoder_model == "False" %} import math import os import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from typing import Optional, Tuple, Union from ...activations import ACT2FN from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel, SequenceSummary from ...pytorch_utils import ( apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from ...utils import logging from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Config logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "{{cookiecutter.checkpoint_identifier}}" _CONFIG_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Config" {{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST = [ "{{cookiecutter.checkpoint_identifier}}", # See all {{cookiecutter.modelname}} models at https://huggingface.co/models?filter={{cookiecutter.lowercase_modelname}} ] def load_tf_weights_in_{{cookiecutter.lowercase_modelname}}(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info(f"Skipping {'/'.join(name)}") continue pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "weight") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == "_embeddings": pointer = getattr(pointer, "weight") elif m_name == "kernel": array = np.transpose(array) try: assert ( pointer.shape == array.shape ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {name}") pointer.data = torch.from_numpy(array) return model # Copied from transformers.models.bert.modeling_bert.BertEmbeddings with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}Embeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), persistent=False, ) def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}SelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr(config, "position_embedding_type", "absolute") if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in {{cookiecutter.camelcase_modelname}}Model forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}SelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}Attention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = {{cookiecutter.camelcase_modelname}}SelfAttention(config, position_embedding_type=position_embedding_type) self.output = {{cookiecutter.camelcase_modelname}}SelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}Intermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}Output(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}Layer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = {{cookiecutter.camelcase_modelname}}Attention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added" self.crossattention = {{cookiecutter.camelcase_modelname}}Attention(config, position_embedding_type="absolute") self.intermediate = {{cookiecutter.camelcase_modelname}}Intermediate(config) self.output = {{cookiecutter.camelcase_modelname}}Output(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: assert hasattr( self, "crossattention" ), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`" # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}Encoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([{{cookiecutter.camelcase_modelname}}Layer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): if self.gradient_checkpointing and self.training and use_cache: logger.warning( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}PredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}LMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = {{cookiecutter.camelcase_modelname}}PredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}OnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = {{cookiecutter.camelcase_modelname}}LMPredictionHead(config) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class {{cookiecutter.camelcase_modelname}}PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = {{cookiecutter.camelcase_modelname}}Config load_tf_weights = load_tf_weights_in_{{cookiecutter.lowercase_modelname}} base_model_prefix = "{{cookiecutter.lowercase_modelname}}" supports_gradient_checkpointing = True _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, {{cookiecutter.camelcase_modelname}}Encoder): module.gradient_checkpointing = value {{cookiecutter.uppercase_modelname}}_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`~{{cookiecutter.camelcase_modelname}}Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ {{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`{{cookiecutter.camelcase_modelname}}Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare {{cookiecutter.modelname}} Model transformer outputting raw hidden-states without any specific head on top.", {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class {{cookiecutter.camelcase_modelname}}Model({{cookiecutter.camelcase_modelname}}PreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. """ def __init__(self, config): super().__init__(config) self.config = config self.embeddings = {{cookiecutter.camelcase_modelname}}Embeddings(config) self.encoder = {{cookiecutter.camelcase_modelname}}Encoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=sequence_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @add_start_docstrings("""{{cookiecutter.modelname}} Model with a `language modeling` head on top. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING) class {{cookiecutter.camelcase_modelname}}ForMaskedLM({{cookiecutter.camelcase_modelname}}PreTrainedModel): def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning( "If you want to use `{{cookiecutter.camelcase_modelname}}ForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config) self.cls = {{cookiecutter.camelcase_modelname}}OnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs): input_shape = input_ids.shape effective_batch_size = input_shape[0] # add a dummy token assert self.config.pad_token_id is not None, "The PAD token should be defined for generation" attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1) dummy_token = torch.full( (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device ) input_ids = torch.cat([input_ids, dummy_token], dim=1) return {"input_ids": input_ids, "attention_mask": attention_mask} @add_start_docstrings( """{{cookiecutter.modelname}} Model with a `language modeling` head on top for CLM fine-tuning. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING ) class {{cookiecutter.camelcase_modelname}}ForCausalLM({{cookiecutter.camelcase_modelname}}PreTrainedModel): _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warning("If you want to use `{{cookiecutter.camelcase_modelname}}ForCausalLM` as a standalone, add `is_decoder=True.`") self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config) self.cls = {{cookiecutter.camelcase_modelname}}OnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Returns: Example: ```python >>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, {{cookiecutter.camelcase_modelname}}ForCausalLM, {{cookiecutter.camelcase_modelname}}Config >>> import torch >>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> config = {{cookiecutter.camelcase_modelname}}Config.from_pretrained("{{cookiecutter.checkpoint_identifier}}") >>> config.is_decoder = True >>> model = {{cookiecutter.camelcase_modelname}}ForCausalLM.from_pretrained('{{cookiecutter.checkpoint_identifier}}', config=config) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) lm_loss = None if labels is not None: # we are doing next-token prediction; shift prediction scores and input ids by one shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[1:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithCrossAttentions( loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): input_shape = input_ids.shape # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) # cut decoder_input_ids if past is used if past_key_values is not None: input_ids = input_ids[:, -1:] return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values} def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],) return reordered_past class {{cookiecutter.camelcase_modelname}}ClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) self.config = config def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = ACT2FN[self.config.hidden_act](x) x = self.dropout(x) x = self.out_proj(x) return x @add_start_docstrings( """{{cookiecutter.modelname}} Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class {{cookiecutter.camelcase_modelname}}ForSequenceClassification({{cookiecutter.camelcase_modelname}}PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config) self.classifier = {{cookiecutter.camelcase_modelname}}ClassificationHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """{{cookiecutter.modelname}} Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class {{cookiecutter.camelcase_modelname}}ForMultipleChoice({{cookiecutter.camelcase_modelname}}PreTrainedModel): def __init__(self, config): super().__init__(config) self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config) self.sequence_summary = SequenceSummary(config) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] pooled_output = self.sequence_summary(sequence_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """{{cookiecutter.modelname}} Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class {{cookiecutter.camelcase_modelname}}ForTokenClassification({{cookiecutter.camelcase_modelname}}PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """{{cookiecutter.modelname}} Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class {{cookiecutter.camelcase_modelname}}ForQuestionAnswering({{cookiecutter.camelcase_modelname}}PreTrainedModel): def __init__(self, config): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) {% else %} import math import copy from typing import Optional, Tuple, List, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...utils import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, CausalLMOutputWithCrossAttentions ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Config logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "{{cookiecutter.checkpoint_identifier}}" _CONFIG_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Config" {{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST = [ "{{cookiecutter.checkpoint_identifier}}", # See all {{cookiecutter.modelname}} models at https://huggingface.co/models?filter={{cookiecutter.lowercase_modelname}} ] def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. """ shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined." # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min) mask_cond = torch.arange(mask.size(-1)) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) def _expand_mask( mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None ): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min) class {{cookiecutter.camelcase_modelname}}LearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int): super().__init__(num_embeddings, embedding_dim) def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0): """`input_ids_shape` is expected to be [bsz x seqlen].""" bsz, seq_len = input_ids_shape[:2] positions = torch.arange( past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device ) return super().forward(positions) class {{cookiecutter.camelcase_modelname}}Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert ( self.head_dim * num_heads == self.embed_dim ), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads})." self.scaling = self.head_dim ** -0.5 self.is_decoder = is_decoder self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit akward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value class {{cookiecutter.camelcase_modelname}}EncoderLayer(nn.Module): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config): super().__init__() self.embed_dim = config.d_model self.self_attn = {{cookiecutter.camelcase_modelname}}Attention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool = False, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape *(batch, seq_len, embed_dim)* attention_mask (`torch.FloatTensor`): attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size *(config.encoder_attention_heads,)*. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states, attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class {{cookiecutter.camelcase_modelname}}DecoderLayer(nn.Module): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config): super().__init__() self.embed_dim = config.d_model self.self_attn = {{cookiecutter.camelcase_modelname}}Attention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = {{cookiecutter.camelcase_modelname}}Attention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, cross_layer_head_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape *(batch, seq_len, embed_dim)* attention_mask (`torch.FloatTensor`): attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape *(batch, seq_len, embed_dim)* encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size *(encoder_attention_heads,)*. cross_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size *(decoder_attention_heads,)*. past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs # Copied from transformers.models.bart.modeling_bart.BartClassificationHead with Bart->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}ClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__( self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float, ): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, hidden_states: torch.Tensor): hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states class {{cookiecutter.camelcase_modelname}}PreTrainedModel(PreTrainedModel): config_class = {{cookiecutter.camelcase_modelname}}Config base_model_prefix = "model" supports_gradient_checkpointing = True def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, ({{cookiecutter.camelcase_modelname}}Decoder, {{cookiecutter.camelcase_modelname}}Encoder)): module.gradient_checkpointing = value {{cookiecutter.uppercase_modelname}}_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`~{{cookiecutter.camelcase_modelname}}Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ {{cookiecutter.uppercase_modelname}}_GENERATION_EXAMPLE = r""" Summarization example: ```python >>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, {{cookiecutter.camelcase_modelname}}ForConditionalGeneration >>> model = {{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='pt') >>> # Generate Summary >>> summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=5) >>> print(tokenizer.decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)) ``` """ {{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Provide for translation and summarization training. By default, the model will create this tensor by shifting the `input_ids` to the right, following the paper. decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`modeling_{{cookiecutter.lowercase_modelname}}._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ {{cookiecutter.uppercase_modelname}}_STANDALONE_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`ProphetNetTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class {{cookiecutter.camelcase_modelname}}Encoder({{cookiecutter.camelcase_modelname}}PreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`{{cookiecutter.camelcase_modelname}}EncoderLayer`]. Args: config: {{cookiecutter.camelcase_modelname}}Config embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx) self.embed_positions = {{cookiecutter.camelcase_modelname}}LearnedPositionalEmbedding( config.max_position_embeddings, embed_dim, ) self.layers = nn.ModuleList([{{cookiecutter.camelcase_modelname}}EncoderLayer(config) for _ in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(embed_dim) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: assert head_mask.size()[0] == ( len(self.layers) ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}." for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = torch.randn([]) if self.training and (dropout_probability < self.layerdrop): # skip the layer layer_outputs = (None, None) else: if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(encoder_layer), hidden_states, attention_mask, (head_mask[idx] if head_mask is not None else None), ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class {{cookiecutter.camelcase_modelname}}Decoder({{cookiecutter.camelcase_modelname}}PreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`{{cookiecutter.camelcase_modelname}}DecoderLayer`] Args: config: {{cookiecutter.camelcase_modelname}}Config embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx) self.embed_positions = {{cookiecutter.camelcase_modelname}}LearnedPositionalEmbedding( config.max_position_embeddings, config.d_model, ) self.layers = nn.ModuleList([{{cookiecutter.camelcase_modelname}}DecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length ).to(self.device) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale attention_mask = self._prepare_decoder_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) # embed positions positions = self.embed_positions(input_shape, past_key_values_length) hidden_states = inputs_embeds + positions hidden_states = self.layernorm_embedding(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # decoder layers if self.gradient_checkpointing and self.training and use_cache: logger.warning("`use_cache = True` is incompatible with gradient checkpointing`. Setting `use_cache = False`...") use_cache = False all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: assert attn_mask.size()[0] == ( len(self.layers) ), f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}." for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = torch.randn([]) if self.training and (dropout_probability < self.layerdrop): continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs, output_attentions, use_cache) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( "The bare {{cookiecutter.modelname}} Model outputting raw hidden-states without any specific head on top.", {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class {{cookiecutter.camelcase_modelname}}Model({{cookiecutter.camelcase_modelname}}PreTrainedModel): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config): super().__init__(config) padding_idx, vocab_size = config.pad_token_id, config.vocab_size self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx) self.encoder = {{cookiecutter.camelcase_modelname}}Encoder(config, self.shared) self.decoder = {{cookiecutter.camelcase_modelname}}Decoder(config, self.shared) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, encoder_outputs=None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings( "The {{cookiecutter.modelname}} Model with a language modeling head. Can be used for summarization.", {{cookiecutter.uppercase_modelname}}_START_DOCSTRING ) class {{cookiecutter.camelcase_modelname}}ForConditionalGeneration({{cookiecutter.camelcase_modelname}}PreTrainedModel): base_model_prefix = "model" _keys_to_ignore_on_load_missing = [ r"final_logits_bias", r"encoder\.version", r"decoder\.version", r"lm_head\.weight", ] def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config): super().__init__(config) self.model = {{cookiecutter.camelcase_modelname}}Model(config) self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens) self._resize_final_logits_bias(new_num_tokens) return new_embeddings def _resize_final_logits_bias(self, new_num_tokens: int) -> None: old_num_tokens = self.final_logits_bias.shape[-1] if new_num_tokens <= old_num_tokens: new_bias = self.final_logits_bias[:, :new_num_tokens] else: extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) self.register_buffer("final_logits_bias", new_bias) def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_end_docstrings({{cookiecutter.uppercase_modelname}}_GENERATION_EXAMPLE) def forward( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, encoder_outputs=None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Conditional generation example: ```python >>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, {{cookiecutter.camelcase_modelname}}ForConditionalGeneration >>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> TXT = "My friends are <mask> but they eat too many carbs." >>> model = {{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> input_ids = tokenizer([TXT], return_tensors='pt')['input_ids'] >>> logits = model(input_ids).logits >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() >>> probs = logits[0, masked_index].softmax(dim=0) >>> values, predictions = probs.topk(5) >>> tokenizer.decode(predictions).split() ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if use_cache: logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs ): # cut decoder_input_ids if past is used if past_key_values is not None: decoder_input_ids = decoder_input_ids[:, -1:] return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, # change this to avoid caching (presumably for debugging) } @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) return reordered_past @add_start_docstrings( """ {{cookiecutter.camelcase_modelname}} model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class {{cookiecutter.camelcase_modelname}}ForSequenceClassification({{cookiecutter.camelcase_modelname}}PreTrainedModel): def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, **kwargs): super().__init__(config, **kwargs) self.model = {{cookiecutter.camelcase_modelname}}Model(config) self.classification_head = {{cookiecutter.camelcase_modelname}}ClassificationHead( config.d_model, config.d_model, config.num_labels, config.classifier_dropout, ) self.model._init_weights(self.classification_head.dense) self.model._init_weights(self.classification_head.out_proj) @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_outputs=None, inputs_embeds=None, decoder_inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False if input_ids is None and inputs_embeds is not None: raise NotImplementedError( f"Passing input embeddings is currently not supported for {self.__class__.__name__}" ) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] # last hidden state eos_mask = input_ids.eq(self.config.eos_token_id).to(hidden_states.device) if len(torch.unique_consecutive(eos_mask.sum(1))) > 1: raise ValueError("All examples must have the same number of <eos> tokens.") sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[ :, -1, : ] logits = self.classification_head(sentence_representation) loss = None if labels is not None: if self.config.problem_type is None: if self.config.num_labels == 1: self.config.problem_type = "regression" elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.config.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return Seq2SeqSequenceClassifierOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings( """ {{cookiecutter.modelname}} Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class {{cookiecutter.camelcase_modelname}}ForQuestionAnswering({{cookiecutter.camelcase_modelname}}PreTrainedModel): def __init__(self, config): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.model = {{cookiecutter.camelcase_modelname}}Model(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.model._init_weights(self.qa_outputs) @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_outputs=None, start_positions=None, end_positions=None, inputs_embeds=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if start_positions is not None and end_positions is not None: use_cache = False outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = ( start_logits, end_logits, ) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return Seq2SeqQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) # Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}DecoderWrapper({{cookiecutter.camelcase_modelname}}PreTrainedModel): """ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. """ def __init__(self, config): super().__init__(config) self.decoder = {{cookiecutter.camelcase_modelname}}Decoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) # Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}ForCausalLM({{cookiecutter.camelcase_modelname}}PreTrainedModel): def __init__(self, config): config = copy.deepcopy(config) config.is_decoder = True config.is_encoder_decoder = False super().__init__(config) self.model = {{cookiecutter.camelcase_modelname}}DecoderWrapper(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. Returns: Example: ```python >>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, {{cookiecutter.camelcase_modelname}}ForCausalLM >>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('facebook/bart-large') >>> model = {{cookiecutter.camelcase_modelname}}ForCausalLM.from_pretrained('facebook/bart-large', add_cross_attention=False) >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model.decoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = self.lm_head(outputs[0]) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs): # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_ids.shape) if past_key_values: input_ids = input_ids[:, -1:] # first step, decoder_cached_states are empty return { "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed "attention_mask": attention_mask, "past_key_values": past_key_values, "use_cache": use_cache, } @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) return reordered_past {% endif -%}
154,748
45.345912
390
py
transformers
transformers-main/templates/adding_a_missing_tokenization_test/cookiecutter-template-{{cookiecutter.modelname}}/test_tokenization_{{cookiecutter.lowercase_modelname}}.py
# coding=utf-8 # Copyright 2022 {{cookiecutter.authors}}. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the {{cookiecutter.modelname}} tokenizer. """ import unittest {% if cookiecutter.has_slow_class == "True" and cookiecutter.has_fast_class == "True" -%} from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, {{cookiecutter.camelcase_modelname}}TokenizerFast {% elif cookiecutter.has_slow_class == "True" -%} from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer {% elif cookiecutter.has_fast_class == "True" -%} from transformers import {{cookiecutter.camelcase_modelname}}TokenizerFast {% endif -%} {% if cookiecutter.has_fast_class == "True" and cookiecutter.slow_tokenizer_use_sentencepiece == "True" -%} from transformers.testing_utils import require_sentencepiece, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_sentencepiece @require_tokenizers {% elif cookiecutter.slow_tokenizer_use_sentencepiece == "True" -%} from transformers.testing_utils import require_sentencepiece from ...test_tokenization_common import TokenizerTesterMixin @require_sentencepiece {% elif cookiecutter.has_fast_class == "True" -%} from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers {% else -%} from ...test_tokenization_common import TokenizerTesterMixin {% endif -%} class {{cookiecutter.camelcase_modelname}}TokenizationTest(TokenizerTesterMixin, unittest.TestCase): {% if cookiecutter.has_slow_class == "True" -%} tokenizer_class = {{cookiecutter.camelcase_modelname}}Tokenizer test_slow_tokenizer = True {% else -%} tokenizer_class = None test_slow_tokenizer = False {% endif -%} {% if cookiecutter.has_fast_class == "True" -%} rust_tokenizer_class = {{cookiecutter.camelcase_modelname}}TokenizerFast test_rust_tokenizer = True {% else -%} rust_tokenizer_class = None test_rust_tokenizer = False {% endif -%} {% if cookiecutter.slow_tokenizer_use_sentencepiece == "True" -%} test_sentencepiece = True {% endif -%} # TODO: Check in `TokenizerTesterMixin` if other attributes need to be changed def setUp(self): super().setUp() raise NotImplementedError( "Here you have to implement the saving of a toy tokenizer in " "`self.tmpdirname`." ) # TODO: add tests with hard-coded target values
3,032
37.884615
121
py
transformers
transformers-main/templates/adding_a_new_example_script/{{cookiecutter.directory_name}}/run_{{cookiecutter.example_shortcut}}.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2022 {{cookiecutter.authors}} and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning a 🤗 Transformers model on {{cookiecutter.example_name}}. """ # You can also adapt this script on your own {{cookiecutter.example_name}} task. Pointers for this are left as comments. {%- if cookiecutter.with_trainer == "True" %} import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional, List import datasets import torch from datasets import load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_MAPPING, AutoConfig, {{cookiecutter.model_class}}, AutoTokenizer, DataCollatorWithPadding, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import send_example_telemetry logger = logging.getLogger(__name__) {%- if cookiecutter.can_train_from_scratch == "True" %} # You should update this to your particular problem to have better documentation of `model_type` MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": "The model checkpoint for weights initialization." "Don't set if you want to train a model from scratch." }, ) model_type: Optional[str] = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) {%- elif cookiecutter.can_train_from_scratch == "False" %} @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." }, ) {% endif %} @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) test_file: Optional[str] = field( default=None, metadata={"help": "An optional input test data file to predict the label on (a text file)."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." }, ) def __post_init__(self): if ( self.dataset_name is None and self.train_file is None and self.validation_file is None and self.test_file is None ): raise ValueError("Need either a dataset name or a training/validation/test file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." if self.test_file is not None: extension = self.test_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`test_file` should be a csv, a json or a txt file." def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_{{cookiecutter.example_shortcut}}", model_args, data_args) # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] if extension == "txt": extension = "text" raw_datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. {%- if cookiecutter.can_train_from_scratch == "True" %} config_kwargs = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") tokenizer_kwargs = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if model_args.model_name_or_path: model = {{cookiecutter.model_class}}.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) else: logger.info("Training new model from scratch") model = {{cookiecutter.model_class}}.from_config(config) model.resize_token_embeddings(len(tokenizer)) {%- elif cookiecutter.can_train_from_scratch == "False" %} config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, # num_labels=num_labels, Uncomment if you have a certain number of labels finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) {% endif %} # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: column_names = raw_datasets["train"].column_names elif training_args.do_eval: column_names = raw_datasets["validation"].column_names elif training_args.do_predict: column_names = raw_datasets["test"].column_names text_column_name = "text" if "text" in column_names else column_names[0] def tokenize_function(examples): return tokenizer(examples[text_column_name], padding="max_length", truncation=True) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if data_args.max_train_samples is not None: # Select Sample from Dataset train_dataset = train_dataset.select(range(data_args.max_train_samples)) # tokenize train dataset in batch with training_args.main_process_first(desc="train dataset map tokenization"): train_dataset = train_dataset.map( tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = raw_datasets["validation"] # Selecting samples from dataset if data_args.max_eval_samples is not None: eval_dataset = eval_dataset.select(range(data_args.max_eval_samples)) # tokenize validation dataset with training_args.main_process_first(desc="validation dataset map tokenization"): eval_dataset = eval_dataset.map( tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_predict: if "test" not in raw_datasets: raise ValueError("--do_predict requires a test dataset") predict_dataset = raw_datasets["test"] # Selecting samples from dataset if data_args.max_predict_samples is not None: predict_dataset = predict_dataset.select(range(data_args.max_predict_samples)) # tokenize predict dataset with training_args.main_process_first(desc="prediction dataset map tokenization"): predict_dataset = predict_dataset.map( tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, ) # Data collator data_collator=default_data_collator if not training_args.fp16 else DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, ) # Training if training_args.do_train: {%- if cookiecutter.can_train_from_scratch == "False" %} if last_checkpoint is not None: checkpoint = last_checkpoint elif os.path.isdir(model_args.model_name_or_path): checkpoint = model_args.model_name_or_path else: checkpoint = None {%- elif cookiecutter.can_train_from_scratch == "True" %} if last_checkpoint is not None: checkpoint = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path): checkpoint = model_args.model_name_or_path else: checkpoint = None {% endif %} train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Prediction if training_args.do_predict: logger.info("*** Predict ***") predictions, labels, metrics = trainer.predict(predict_dataset) max_predict_samples = data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset) metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset)) trainer.log_metrics("predict", metrics) trainer.save_metrics("predict", metrics) # write custom code for saving predictions according to task def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main() {%- elif cookiecutter.with_trainer == "False" %} import argparse import logging import math import os import random import datasets from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from tqdm.auto import tqdm import transformers from accelerate import Accelerator from transformers import ( CONFIG_MAPPING, MODEL_MAPPING, AdamW, AutoConfig, {{cookiecutter.model_class}}, AutoTokenizer, DataCollatorWithPadding, PretrainedConfig, SchedulerType, default_data_collator, get_scheduler, set_seed, ) from transformers.utils import send_example_telemetry logger = logging.getLogger(__name__) {%- if cookiecutter.can_train_from_scratch == "True" %} # You should update this to your particular problem to have better documentation of `model_type` MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) {% endif %} def parse_args(): parser = argparse.ArgumentParser(description="Finetune a transformers model on a text classification task") parser.add_argument( "--dataset_name", type=str, default=None, help="The name of the dataset to use (via the datasets library).", ) parser.add_argument( "--dataset_config_name", type=str, default=None, help= "The configuration name of the dataset to use (via the datasets library).", ) parser.add_argument( "--train_file", type=str, default=None, help="A csv or a json file containing the training data." ) parser.add_argument( "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." ) parser.add_argument( "--max_length", type=int, default=128, help=( "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated," " sequences shorter will be padded if `--pad_to_max_lengh` is passed." ), ) parser.add_argument( "--pad_to_max_length", action="store_true", help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=True, ) parser.add_argument( "--config_name", type=str, default=None, help="Pretrained config name or path if not the same as model_name", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--use_slow_tokenizer", action="store_true", help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") {%- if cookiecutter.can_train_from_scratch == "True" %} parser.add_argument( "--model_type", type=str, default=None, help="Model type to use if training from scratch.", choices=MODEL_TYPES, ) {% endif %} args = parser.parse_args() # Sanity checks if args.task_name is None and args.train_file is None and args.validation_file is None: raise ValueError("Need either a task name or a training/validation file.") else: if args.train_file is not None: extension = args.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if args.validation_file is not None: extension = args.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) return args def main(): args = parse_args() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_{{cookiecutter.example_shortcut}", args) # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. accelerator = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name) else: data_files = {} if args.train_file is not None: data_files["train"] = args.train_file if args.validation_file is not None: data_files["validation"] = args.validation_file extension = args.train_file.split(".")[-1] raw_datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. {%- if cookiecutter.can_train_from_scratch == "True" %} if model_args.config_name: config = AutoConfig.from_pretrained(args.model_name_or_path) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(args.model_name_or_path) else: config = CONFIG_MAPPING[args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, use_fast=not args.use_slow_tokenizer) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, use_fast=not args.use_slow_tokenizer) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if model_args.model_name_or_path: model = {{cookiecutter.model_class}}.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, ) else: logger.info("Training new model from scratch") model = {{cookiecutter.model_class}}.from_config(config) model.resize_token_embeddings(len(tokenizer)) {%- elif cookiecutter.can_train_from_scratch == "False" %} config = AutoConfig.from_pretrained( args.config_name if model_args.config_name else args.model_name_or_path, # num_labels=num_labels, Uncomment if you have a certain number of labels finetuning_task=data_args.task_name, ) tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name if model_args.tokenizer_name else args.model_name_or_path, use_fast=not args.use_slow_tokenizer, ) model = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, ) {% endif %} # Preprocessing the datasets. # First we tokenize all the texts. column_names = raw_datasets["train"].column_names text_column_name = "text" if "text" in column_names else column_names[0] padding = "max_length" if args.pad_to_max_length else False def tokenize_function(examples): result = tokenizer(examples[text_column_name], padding=padding, max_length=args.max_length, truncation=True) if "label" in examples: result["labels"] = examples["label"] return result processed_datasets = raw_datasets.map( preprocess_function, batched=True, remove_columns=raw_datasets["train"].column_names ) train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["validation"] # Log a few random samples from the training set: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # DataLoaders creation: if args.pad_to_max_length: # If padding was already done ot max length, we use the default data collator that will just convert everything # to tensors. data_collator = default_data_collator else: # Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of # the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple # of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None)) train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size ) eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size) # Optimizer # Split weights in two groups, one with weight decay and the other not. no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader ) # Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be # shorter in multiprocess) # Scheduler and math around the number of training steps. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch else: args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) # TODO Get the proper metric function # metric = load_metric(xxx) # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 for epoch in range(args.num_train_epochs): model.train() for step, batch in enumerate(train_dataloader): outputs = model(**batch) loss = outputs.loss loss = loss / args.gradient_accumulation_steps accelerator.backward(loss) if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) completed_steps += 1 if completed_steps >= args.max_train_steps: break model.eval() for step, batch in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) metric.add_batch( predictions=accelerator.gather(predictions), references=accelerator.gather(batch["labels"]), ) eval_metric = metric.compute() logger.info(f"epoch {epoch}: {eval_metric}") if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) if __name__ == "__main__": main() {% endif %}
37,654
40.56181
130
py
transformers
transformers-main/scripts/stale.py
# Copyright 2021 The HuggingFace Team, the AllenNLP library authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Script to close stale issue. Taken in part from the AllenNLP repository. https://github.com/allenai/allennlp. """ from datetime import datetime as dt import os from github import Github LABELS_TO_EXEMPT = [ "good first issue", "good second issue", "good difficult issue", "feature request", "new model", "wip", ] def main(): g = Github(os.environ["GITHUB_TOKEN"]) repo = g.get_repo("huggingface/transformers") open_issues = repo.get_issues(state="open") for issue in open_issues: comments = sorted([comment for comment in issue.get_comments()], key=lambda i: i.created_at, reverse=True) last_comment = comments[0] if len(comments) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state="closed") elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) if __name__ == "__main__": main()
2,654
38.626866
115
py
transformers
transformers-main/scripts/check_tokenizers.py
from collections import Counter import datasets import transformers from transformers.convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from transformers.utils import logging logging.set_verbosity_info() TOKENIZER_CLASSES = { name: (getattr(transformers, name), getattr(transformers, name + "Fast")) for name in SLOW_TO_FAST_CONVERTERS } dataset = datasets.load_dataset("xnli", split="test+validation") total = 0 perfect = 0 imperfect = 0 wrong = 0 def check_diff(spm_diff, tok_diff, slow, fast): if spm_diff == list(reversed(tok_diff)): # AAA -> AA+A vs A+AA case. return True elif len(spm_diff) == len(tok_diff) and fast.decode(spm_diff) == fast.decode(tok_diff): # Second order OK # Barrich -> Barr + ich vs Bar + rich return True spm_reencoded = slow.encode(slow.decode(spm_diff)) tok_reencoded = fast.encode(fast.decode(spm_diff)) if spm_reencoded != spm_diff and spm_reencoded == tok_reencoded: # Type 3 error. # Snehagatha -> # Sne, h, aga, th, a # Sne, ha, gat, ha # Encoding the wrong with sp does not even recover what spm gave us # It fits tokenizer however... return True return False def check_LTR_mark(line, idx, fast): enc = fast.encode_plus(line)[0] offsets = enc.offsets curr, prev = offsets[idx], offsets[idx - 1] if curr is not None and line[curr[0] : curr[1]] == "\u200f": return True if prev is not None and line[prev[0] : prev[1]] == "\u200f": return True def check_details(line, spm_ids, tok_ids, slow, fast): # Encoding can be the same with same result AAA -> A + AA vs AA + A # We can check that we use at least exactly the same number of tokens. for i, (spm_id, tok_id) in enumerate(zip(spm_ids, tok_ids)): if spm_id != tok_id: break first = i for i, (spm_id, tok_id) in enumerate(zip(reversed(spm_ids), reversed(tok_ids))): if spm_id != tok_id: break last = len(spm_ids) - i spm_diff = spm_ids[first:last] tok_diff = tok_ids[first:last] if check_diff(spm_diff, tok_diff, slow, fast): return True if check_LTR_mark(line, first, fast): return True if last - first > 5: # We might have twice a single problem, attempt to subdivide the disjointed tokens into smaller problems spms = Counter(spm_ids[first:last]) toks = Counter(tok_ids[first:last]) removable_tokens = {spm_ for (spm_, si) in spms.items() if toks.get(spm_, 0) == si} min_width = 3 for i in range(last - first - min_width): if all(spm_ids[first + i + j] in removable_tokens for j in range(min_width)): possible_matches = [ k for k in range(last - first - min_width) if tok_ids[first + k : first + k + min_width] == spm_ids[first + i : first + i + min_width] ] for j in possible_matches: if check_diff(spm_ids[first : first + i], tok_ids[first : first + j], sp, tok) and check_details( line, spm_ids[first + i : last], tok_ids[first + j : last], slow, fast, ): return True print(f"Spm: {[fast.decode([spm_ids[i]]) for i in range(first, last)]}") try: print(f"Tok: {[fast.decode([tok_ids[i]]) for i in range(first, last)]}") except Exception: pass ok_start = fast.decode(spm_ids[:first]) ok_end = fast.decode(spm_ids[last:]) wrong = fast.decode(spm_ids[first:last]) print() print(wrong) return False def test_string(slow, fast, text): global perfect global imperfect global wrong global total slow_ids = slow.encode(text) fast_ids = fast.encode(text) skip_assert = False total += 1 if slow_ids != fast_ids: if check_details(text, slow_ids, fast_ids, slow, fast): skip_assert = True imperfect += 1 else: wrong += 1 else: perfect += 1 if total % 10000 == 0: print(f"({perfect} / {imperfect} / {wrong} ----- {perfect + imperfect + wrong})") if skip_assert: return assert ( slow_ids == fast_ids ), f"line {text} : \n\n{slow_ids}\n{fast_ids}\n\n{slow.tokenize(text)}\n{fast.tokenize(text)}" def test_tokenizer(slow, fast): global batch_total for i in range(len(dataset)): # premise, all languages for text in dataset[i]["premise"].values(): test_string(slow, fast, text) # hypothesis, all languages for text in dataset[i]["hypothesis"]["translation"]: test_string(slow, fast, text) if __name__ == "__main__": for name, (slow_class, fast_class) in TOKENIZER_CLASSES.items(): checkpoint_names = list(slow_class.max_model_input_sizes.keys()) for checkpoint in checkpoint_names: imperfect = 0 perfect = 0 wrong = 0 total = 0 print(f"========================== Checking {name}: {checkpoint} ==========================") slow = slow_class.from_pretrained(checkpoint, force_download=True) fast = fast_class.from_pretrained(checkpoint, force_download=True) test_tokenizer(slow, fast) print(f"Accuracy {perfect * 100 / total:.2f}")
5,547
31.635294
117
py
transformers
transformers-main/scripts/distributed/torch-distributed-gpu-test.py
#!/usr/bin/env python # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def printflock(*msgs): """solves multi-process interleaved print problem""" with open(__file__, "r") as fh: fcntl.flock(fh, fcntl.LOCK_EX) try: print(*msgs) finally: fcntl.flock(fh, fcntl.LOCK_UN) local_rank = int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) device = torch.device("cuda", local_rank) hostname = socket.gethostname() gpu = f"[{hostname}-{local_rank}]" try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank rank = dist.get_rank() world_size = dist.get_world_size() printflock(f"{gpu} is OK (global rank: {rank}/{world_size})") dist.barrier() if rank == 0: printflock(f"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}") except Exception: printflock(f"{gpu} is broken") raise
3,009
31.365591
109
py
transformers
transformers-main/scripts/pegasus/build_test_sample_spm_no_bos.py
#!/usr/bin/env python # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script builds a small sample spm file tests/fixtures/test_sentencepiece_no_bos.model, with features needed by pegasus # 1. pip install sentencepiece # # 2. wget https://raw.githubusercontent.com/google/sentencepiece/master/data/botchan.txt # 3. build import sentencepiece as spm # pegasus: # 1. no bos # 2. eos_id is 1 # 3. unk_id is 2 # build a sample spm file accordingly spm.SentencePieceTrainer.train('--input=botchan.txt --model_prefix=test_sentencepiece_no_bos --bos_id=-1 --unk_id=2 --eos_id=1 --vocab_size=1000') # 4. now update the fixture # mv test_sentencepiece_no_bos.model ../../tests/fixtures/
1,252
35.852941
148
py
transformers
transformers-main/scripts/fsmt/gen-card-facebook-wmt19.py
#!/usr/bin/env python # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def write_model_card(model_card_dir, src_lang, tgt_lang): texts = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] scores = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } pair = f"{src_lang}-{tgt_lang}" readme = f""" --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-{src_lang}-{tgt_lang}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR's WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) """ os.makedirs(model_card_dir, exist_ok=True) path = os.path.join(model_card_dir, "README.md") print(f"Generating {path}") with open(path, "w", encoding="utf-8") as f: f.write(readme) # make sure we are under the root of the project repo_dir = Path(__file__).resolve().parent.parent.parent model_cards_dir = repo_dir / "model_cards" for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: base, src_lang, tgt_lang = model_name.split("-") model_card_dir = model_cards_dir / "facebook" / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
5,476
32.193939
270
py
transformers
transformers-main/scripts/fsmt/gen-card-allenai-wmt19.py
#!/usr/bin/env python # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Usage: # ./gen-card-allenai-wmt19.py import os from pathlib import Path def write_model_card(model_card_dir, src_lang, tgt_lang, model_name): texts = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, nicht wahr?", } # BLUE scores as follows: # "pair": [fairseq, transformers] scores = { "wmt19-de-en-6-6-base": [0, 38.37], "wmt19-de-en-6-6-big": [0, 39.90], } pair = f"{src_lang}-{tgt_lang}" readme = f""" --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - allenai license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of fairseq-based [wmt19 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}. For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369). 2 models are available: * [wmt19-de-en-6-6-big](https://huggingface.co/allenai/wmt19-de-en-6-6-big) * [wmt19-de-en-6-6-base](https://huggingface.co/allenai/wmt19-de-en-6-6-base) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "allenai/{model_name}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias ## Training data Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369). ## Eval results Here are the BLEU scores: model | transformers -------|--------- {model_name} | {scores[model_name][1]} The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=5 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ``` @misc{{kasai2020deep, title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}}, author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}}, year={{2020}}, eprint={{2006.10369}}, archivePrefix={{arXiv}}, primaryClass={{cs.CL}} }} ``` """ model_card_dir.mkdir(parents=True, exist_ok=True) path = os.path.join(model_card_dir, "README.md") print(f"Generating {path}") with open(path, "w", encoding="utf-8") as f: f.write(readme) # make sure we are under the root of the project repo_dir = Path(__file__).resolve().parent.parent.parent model_cards_dir = repo_dir / "model_cards" for model_name in ["wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big"]: model_card_dir = model_cards_dir / "allenai" / model_name write_model_card(model_card_dir, src_lang="de", tgt_lang="en", model_name=model_name)
4,456
28.130719
270
py
transformers
transformers-main/scripts/fsmt/fsmt-make-tiny-model.py
#!/usr/bin/env python # coding: utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny model through reduction of a normal pre-trained model, but keeping the # full vocab, merges file, and thus also resulting in a larger model due to a large vocab size. # This gives ~3MB in total for all files. # # If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated # # # It will be used then as "stas/tiny-wmt19-en-de" # Build from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration mname = "facebook/wmt19-en-de" tokenizer = FSMTTokenizer.from_pretrained(mname) # get the correct vocab sizes, etc. from the master model config = FSMTConfig.from_pretrained(mname) config.update(dict( d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1)) tiny_model = FSMTForConditionalGeneration(config) print(f"num of params {tiny_model.num_parameters()}") # Test batch = tokenizer(["Making tiny model"], return_tensors="pt") outputs = tiny_model(**batch) print("test output:", len(outputs.logits[0])) # Save mname_tiny = "tiny-wmt19-en-de" tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-de
2,179
35.333333
114
py
transformers
transformers-main/scripts/fsmt/fsmt-make-super-tiny-model.py
#!/usr/bin/env python # coding: utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES mname_tiny = "tiny-wmt19-en-ru" # Build # borrowed from a test vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""] with tempfile.TemporaryDirectory() as tmpdirname: build_dir = Path(tmpdirname) src_vocab_file = build_dir / VOCAB_FILES_NAMES["src_vocab_file"] tgt_vocab_file = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"] merges_file = build_dir / VOCAB_FILES_NAMES["merges_file"] with open(src_vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, "w") as fp : fp.write("\n".join(merges)) tokenizer = FSMTTokenizer( langs=["en", "ru"], src_vocab_size = len(vocab), tgt_vocab_size = len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) config = FSMTConfig( langs=['ru', 'en'], src_vocab_size=1000, tgt_vocab_size=1000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) tiny_model = FSMTForConditionalGeneration(config) print(f"num of params {tiny_model.num_parameters()}") # Test batch = tokenizer(["Making tiny model"], return_tensors="pt") outputs = tiny_model(**batch) print("test output:", len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-ru
3,263
36.090909
171
py
transformers
transformers-main/scripts/fsmt/gen-card-allenai-wmt16.py
#!/usr/bin/env python # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def write_model_card(model_card_dir, src_lang, tgt_lang, model_name): texts = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, nicht wahr?", } # BLUE scores as follows: # "pair": [fairseq, transformers] scores = { "wmt16-en-de-dist-12-1": [28.3, 27.52], "wmt16-en-de-dist-6-1": [27.4, 27.11], "wmt16-en-de-12-1": [26.9, 25.75], } pair = f"{src_lang}-{tgt_lang}" readme = f""" --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt16 - allenai license: apache-2.0 datasets: - wmt16 metrics: - bleu --- # FSMT ## Model description This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}. For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369). All 3 models are available: * [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1) * [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1) * [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "allenai/{model_name}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias ## Training data Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369). ## Eval results Here are the BLEU scores: model | fairseq | transformers -------|---------|---------- {model_name} | {scores[model_name][0]} | {scores[model_name][1]} The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs. The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=5 mkdir -p $DATA_DIR sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` ## Data Sources - [training, etc.](http://www.statmt.org/wmt16/) - [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372) ### BibTeX entry and citation info ``` @misc{{kasai2020deep, title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}}, author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}}, year={{2020}}, eprint={{2006.10369}}, archivePrefix={{arXiv}}, primaryClass={{cs.CL}} }} ``` """ model_card_dir.mkdir(parents=True, exist_ok=True) path = os.path.join(model_card_dir, "README.md") print(f"Generating {path}") with open(path, "w", encoding="utf-8") as f: f.write(readme) # make sure we are under the root of the project repo_dir = Path(__file__).resolve().parent.parent.parent model_cards_dir = repo_dir / "model_cards" for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: model_card_dir = model_cards_dir / "allenai" / model_name write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
4,874
30.25
270
py
transformers
transformers-main/scripts/benchmark/trainer-benchmark.py
#!/usr/bin/env python # HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers nan = float("nan") class Tee: """ A helper class to tee print's output into a file. Usage: sys.stdout = Tee(filename) """ def __init__(self, filename): self.stdout = sys.stdout self.file = open(filename, "a") def __getattr__(self, attr): return getattr(self.stdout, attr) def write(self, msg): self.stdout.write(msg) # strip tqdm codes self.file.write(re.sub(r"^.*\r", "", msg, 0, re.M)) def get_original_command(max_width=80, full_python_path=False): """ Return the original command line string that can be replayed nicely and wrapped for 80 char width. Args: max_width (`int`, `optional`, defaults to 80): The width to wrap for. full_python_path (`bool`, `optional`, defaults to `False`): Whether to replicate the full path or just the last segment (i.e. `python`). """ cmd = [] # deal with critical env vars env_keys = ["CUDA_VISIBLE_DEVICES"] for key in env_keys: val = os.environ.get(key, None) if val is not None: cmd.append(f"{key}={val}") # python executable (not always needed if the script is executable) python = sys.executable if full_python_path else sys.executable.split("/")[-1] cmd.append(python) # now the normal args cmd += list(map(shlex.quote, sys.argv)) # split up into up to MAX_WIDTH lines with shell multi-line escapes lines = [] current_line = "" while len(cmd) > 0: current_line += f"{cmd.pop(0)} " if len(cmd) == 0 or len(current_line) + len(cmd[0]) + 1 > max_width - 1: lines.append(current_line) current_line = "" return "\\\n".join(lines) def get_base_command(args, output_dir): # unwrap multi-line input args.base_cmd = re.sub(r"[\\\n]+", " ", args.base_cmd) # remove --output_dir if any and set our own args.base_cmd = re.sub("--output_dir\s+[^\s]+", "", args.base_cmd) args.base_cmd += f" --output_dir {output_dir}" # ensure we have --overwrite_output_dir args.base_cmd = re.sub("--overwrite_output_dir\s+", "", args.base_cmd) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd) def process_run_single(id, cmd, variation, output_dir, target_metric_key, metric_keys, verbose): # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0) return dict( {k: random.uniform(0, 100) for k in metric_keys}, **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222])}, ) result = subprocess.run(cmd, capture_output=True, text=True) if verbose: print("STDOUT", result.stdout) print("STDERR", result.stderr) # save the streams prefix = variation.replace(" ", "-") with open(Path(output_dir) / f"log.{prefix}.stdout.txt", "w") as f: f.write(result.stdout) with open(Path(output_dir) / f"log.{prefix}.stderr.txt", "w") as f: f.write(result.stderr) if result.returncode != 0: if verbose: print("failed") return {target_metric_key: nan} with io.open(f"{output_dir}/all_results.json", "r", encoding="utf-8") as f: metrics = json.load(f) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def process_run( id, cmd, variation_key, variation, longest_variation_len, target_metric_key, report_metric_keys, repeat_times, output_dir, verbose, ): results = [] metrics = [] preamble = f"{id}: {variation:<{longest_variation_len}}" outcome = f"{preamble}: " metric_keys = set(report_metric_keys + [target_metric_key]) for i in tqdm(range(repeat_times), desc=preamble, leave=False): single_run_metrics = process_run_single( id, cmd, variation, output_dir, target_metric_key, metric_keys, verbose ) result = single_run_metrics[target_metric_key] if not math.isnan(result): metrics.append(single_run_metrics) results.append(result) outcome += "✓" else: outcome += "✘" outcome = f"\33[2K\r{outcome}" if len(metrics) > 0: mean_metrics = {k: fmean([x[k] for x in metrics]) for k in metrics[0].keys()} mean_target = round(mean_metrics[target_metric_key], 2) results_str = f"{outcome} {mean_target}" if len(metrics) > 1: results_str += f" {tuple(round(x, 2) for x in results)}" print(results_str) mean_metrics[variation_key] = variation return mean_metrics else: print(outcome) return {variation_key: variation, target_metric_key: nan} def get_versions(): properties = torch.cuda.get_device_properties(torch.device("cuda")) return f""" Datetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')} Software: transformers: {transformers.__version__} torch : {torch.__version__} cuda : {torch.version.cuda} python : {platform.python_version()} Hardware: {torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB """ def process_results(results, target_metric_key, report_metric_keys, base_variation, output_dir): df = pd.DataFrame(results) variation_key = "variation" diff_key = "diff_%" sentinel_value = nan if base_variation is not None and len(df[df[variation_key] == base_variation]): # this may still return nan sentinel_value = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(sentinel_value): # as a fallback, use the minimal value as the sentinel sentinel_value = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(sentinel_value): df[diff_key] = df.apply( lambda r: round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value) if not math.isnan(r[target_metric_key]) else 0, axis="columns", ) # re-order columns cols = [variation_key, target_metric_key, diff_key, *report_metric_keys] df = df.reindex(cols, axis="columns") # reorder cols # capitalize df = df.rename(str.capitalize, axis="columns") # make the cols as narrow as possible df_github = df.rename(lambda c: c.replace("_", "<br>"), axis="columns") df_console = df.rename(lambda c: c.replace("_", "\n"), axis="columns") report = ["", "Copy between the cut-here-lines and paste as is to github or a forum"] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=False, floatfmt=".2f")] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=False, floatfmt=".2f")] print("\n\n".join(report)) def main(): parser = argparse.ArgumentParser() parser.add_argument( "--base-cmd", default=None, type=str, required=True, help="Base cmd", ) parser.add_argument( "--variations", default=None, type=str, nargs="+", required=True, help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'", ) parser.add_argument( "--base-variation", default=None, type=str, help="Baseline variation to compare to. if None the minimal target value will be used to compare against", ) parser.add_argument( "--target-metric-key", default=None, type=str, required=True, help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second", ) parser.add_argument( "--report-metric-keys", default="", type=str, help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples", ) parser.add_argument( "--repeat-times", default=1, type=int, help="How many times to re-run each variation - an average will be reported", ) parser.add_argument( "--output_dir", default="output_benchmark", type=str, help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked", ) parser.add_argument( "--verbose", default=False, action="store_true", help="Whether to show the outputs of each run or just the benchmark progress", ) args = parser.parse_args() output_dir = args.output_dir Path(output_dir).mkdir(exist_ok=True) base_cmd = get_base_command(args, output_dir) # split each dimension into its --foo variations dims = [list(map(str.strip, re.split(r"\|", x))) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty variations = list(map(str.strip, map(" ".join, itertools.product(*dims)))) longest_variation_len = max(len(x) for x in variations) # split wanted keys report_metric_keys = args.report_metric_keys.split() # capture prints into a log file for convenience report_fn = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}.txt" print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt") print(f"and this script's output is also piped into {report_fn}") sys.stdout = Tee(report_fn) print(f"\n*** Running {len(variations)} benchmarks:") print(f"Base command: {' '.join(base_cmd)}") variation_key = "variation" results = [] for id, variation in enumerate(tqdm(variations, desc="Total completion: ", leave=False)): cmd = base_cmd + variation.split() results.append( process_run( id + 1, cmd, variation_key, variation, longest_variation_len, args.target_metric_key, report_metric_keys, args.repeat_times, output_dir, args.verbose, ) ) process_results(results, args.target_metric_key, report_metric_keys, args.base_variation, output_dir) if __name__ == "__main__": main()
15,565
33.668151
189
py
transformers
transformers-main/tests/test_modeling_utils.py
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import json import os import os.path import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from pytest import mark from requests.exceptions import HTTPError from transformers import ( AutoConfig, AutoModel, PretrainedConfig, is_torch_available, logging, ) from transformers.testing_utils import ( TOKEN, USER, CaptureLogger, TestCasePlus, is_staging_test, require_accelerate, require_safetensors, require_torch, require_torch_gpu, require_torch_multi_gpu, require_usr_bin_time, slow, ) from transformers.utils import ( SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_configuration import CustomConfig, NoSuperInitConfig # noqa E402 if is_torch_available(): import torch from test_module.custom_modeling import CustomModel, NoSuperInitModel from torch import nn from transformers import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, AutoModelForCausalLM, AutoTokenizer, BertConfig, BertModel, CLIPTextModel, PreTrainedModel, T5Config, T5ForConditionalGeneration, ) from transformers.modeling_utils import shard_checkpoint # Fake pretrained models for tests class BaseModel(PreTrainedModel): base_model_prefix = "base" config_class = PretrainedConfig def __init__(self, config): super().__init__(config) self.linear = nn.Linear(5, 5) self.linear_2 = nn.Linear(5, 5) def forward(self, x): return self.linear_2(self.linear(x)) class BaseModelWithTiedWeights(PreTrainedModel): config_class = PretrainedConfig def __init__(self, config): super().__init__(config) self.linear = nn.Linear(5, 5) self.linear_2 = nn.Linear(5, 5) def forward(self, x): return self.linear_2(self.linear(x)) def tie_weights(self): self.linear_2.weight = self.linear.weight class ModelWithHead(PreTrainedModel): base_model_prefix = "base" config_class = PretrainedConfig def _init_weights(self, module): pass def __init__(self, config): super().__init__(config) self.base = BaseModel(config) # linear is a common name between Base and Head on purpose. self.linear = nn.Linear(5, 5) self.linear2 = nn.Linear(5, 5) def forward(self, x): return self.linear2(self.linear(self.base(x))) class ModelWithHeadAndTiedWeights(PreTrainedModel): base_model_prefix = "base" config_class = PretrainedConfig def _init_weights(self, module): pass def __init__(self, config): super().__init__(config) self.base = BaseModel(config) self.decoder = nn.Linear(5, 5) def forward(self, x): return self.decoder(self.base(x)) def tie_weights(self): self.decoder.weight = self.base.linear.weight TINY_T5 = "patrickvonplaten/t5-tiny-random" TINY_BERT_FOR_TOKEN_CLASSIFICATION = "hf-internal-testing/tiny-bert-for-token-classification" def check_models_equal(model1, model2): models_are_equal = True for model1_p, model2_p in zip(model1.parameters(), model2.parameters()): if model1_p.data.ne(model2_p.data).sum() > 0: models_are_equal = False return models_are_equal @require_torch class ModelUtilsTest(TestCasePlus): @slow def test_model_from_pretrained(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = BertConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, PretrainedConfig) model = BertModel.from_pretrained(model_name) model, loading_info = BertModel.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, PreTrainedModel) self.assertEqual(len(loading_info["missing_keys"]), 0) self.assertEqual(len(loading_info["unexpected_keys"]), 8) self.assertEqual(len(loading_info["mismatched_keys"]), 0) self.assertEqual(len(loading_info["error_msgs"]), 0) config = BertConfig.from_pretrained(model_name, output_attentions=True, output_hidden_states=True) # Not sure this is the intended behavior. TODO fix Lysandre & Thom config.name_or_path = model_name model = BertModel.from_pretrained(model_name, output_attentions=True, output_hidden_states=True) self.assertEqual(model.config.output_hidden_states, True) self.assertEqual(model.config, config) def test_model_from_pretrained_subfolder(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") model = BertModel(config) subfolder = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(tmp_dir, subfolder)) with self.assertRaises(OSError): _ = BertModel.from_pretrained(tmp_dir) model_loaded = BertModel.from_pretrained(tmp_dir, subfolder=subfolder) self.assertTrue(check_models_equal(model, model_loaded)) def test_model_from_pretrained_subfolder_sharded(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") model = BertModel(config) subfolder = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(tmp_dir, subfolder), max_shard_size="10KB") with self.assertRaises(OSError): _ = BertModel.from_pretrained(tmp_dir) model_loaded = BertModel.from_pretrained(tmp_dir, subfolder=subfolder) self.assertTrue(check_models_equal(model, model_loaded)) def test_model_from_pretrained_hub_subfolder(self): subfolder = "bert" model_id = "hf-internal-testing/tiny-random-bert-subfolder" with self.assertRaises(OSError): _ = BertModel.from_pretrained(model_id) model = BertModel.from_pretrained(model_id, subfolder=subfolder) self.assertIsNotNone(model) def test_model_from_pretrained_hub_subfolder_sharded(self): subfolder = "bert" model_id = "hf-internal-testing/tiny-random-bert-sharded-subfolder" with self.assertRaises(OSError): _ = BertModel.from_pretrained(model_id) model = BertModel.from_pretrained(model_id, subfolder=subfolder) self.assertIsNotNone(model) def test_model_from_pretrained_with_different_pretrained_model_name(self): model = T5ForConditionalGeneration.from_pretrained(TINY_T5) self.assertIsNotNone(model) logger = logging.get_logger("transformers.configuration_utils") with CaptureLogger(logger) as cl: BertModel.from_pretrained(TINY_T5) self.assertTrue("You are using a model of type t5 to instantiate a model of type bert" in cl.out) def test_model_from_config_torch_dtype(self): # test that the model can be instantiated with dtype of user's choice - as long as it's a # float dtype. To make it happen config.torch_dtype needs to be set before instantiating the # model from the config object. config = T5Config.from_pretrained(TINY_T5) model = AutoModel.from_config(config) # XXX: isn't supported # model = T5ForConditionalGeneration.from_config(config) self.assertEqual(model.dtype, torch.float32) model = AutoModel.from_config(config, torch_dtype=torch.float16) self.assertEqual(model.dtype, torch.float16) # torch.set_default_dtype() supports only float dtypes, so will fail with non-float type with self.assertRaises(ValueError): model = AutoModel.from_config(config, torch_dtype=torch.int64) def test_model_from_pretrained_torch_dtype(self): # test that the model can be instantiated with dtype of either # 1. explicit from_pretrained's torch_dtype argument # 2. via autodiscovery by looking at model weights (torch_dtype="auto") # so if a model.half() was saved, we want it to be instantiated as such. # # test an explicit model class, but also AutoModel separately as the latter goes through a different code path model_path = self.get_auto_remove_tmp_dir() # baseline - we know TINY_T5 is fp32 model model = T5ForConditionalGeneration.from_pretrained(TINY_T5) self.assertEqual(model.dtype, torch.float32) def remove_torch_dtype(model_path): file = f"{model_path}/config.json" with open(file, "r", encoding="utf-8") as f: s = json.load(f) s.pop("torch_dtype") with open(file, "w", encoding="utf-8") as f: json.dump(s, f) # test the default fp32 save_pretrained => from_pretrained cycle model.save_pretrained(model_path) model = T5ForConditionalGeneration.from_pretrained(model_path) self.assertEqual(model.dtype, torch.float32) # 1. test torch_dtype="auto" via `config.torch_dtype` model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto") self.assertEqual(model.dtype, torch.float32) # 2. test torch_dtype="auto" via auto-derivation # now remove the torch_dtype entry from config.json and try "auto" again which should # perform auto-derivation from weights remove_torch_dtype(model_path) model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto") self.assertEqual(model.dtype, torch.float32) # test forced loading in fp16 (even though the weights are in fp32) model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype=torch.float16) self.assertEqual(model.dtype, torch.float16) # test fp16 save_pretrained, loaded with auto-detection model = model.half() model.save_pretrained(model_path) # 1. test torch_dtype="auto" via `config.torch_dtype` model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto") self.assertEqual(model.config.torch_dtype, torch.float16) self.assertEqual(model.dtype, torch.float16) # tests `config.torch_dtype` saving with open(f"{model_path}/config.json") as f: config_dict = json.load(f) self.assertEqual(config_dict["torch_dtype"], "float16") # 2. test torch_dtype="auto" via auto-derivation # now same with using config info remove_torch_dtype(model_path) model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto") self.assertEqual(model.dtype, torch.float16) # 3. now retest that AutoModel behaves the same wrt torch_dtype="auto" as T5ForConditionalGeneration model = AutoModel.from_pretrained(model_path, torch_dtype="auto") self.assertEqual(model.dtype, torch.float16) # test fp16 save_pretrained, loaded with the explicit fp16 model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype=torch.float16) self.assertEqual(model.dtype, torch.float16) # test AutoModel separately as it goes through a different path # test auto-detection - as currently TINY_T5 doesn't have torch_dtype entry model = AutoModel.from_pretrained(TINY_T5, torch_dtype="auto") # test that the config object didn't get polluted with torch_dtype="auto" # there was a bug that after this call we ended up with config.torch_dtype=="auto" self.assertNotEqual(model.config.torch_dtype, "auto") # now test the outcome self.assertEqual(model.dtype, torch.float32) model = AutoModel.from_pretrained(TINY_T5, torch_dtype=torch.float16) self.assertEqual(model.dtype, torch.float16) # test model whose first param is not of a floating type, but int model = AutoModel.from_pretrained(TINY_BERT_FOR_TOKEN_CLASSIFICATION, torch_dtype="auto") self.assertEqual(model.dtype, torch.float32) def test_no_super_init_config_and_model(self): config = NoSuperInitConfig(attribute=32) model = NoSuperInitModel(config) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) new_model = NoSuperInitModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_shard_checkpoint(self): # This is the model we will use, total size 340,000 bytes. model = torch.nn.Sequential( torch.nn.Linear(100, 200, bias=False), # size 80,000 torch.nn.Linear(200, 200, bias=False), # size 160,000 torch.nn.Linear(200, 100, bias=False), # size 80,000 torch.nn.Linear(100, 50, bias=False), # size 20,000 ) state_dict = model.state_dict() with self.subTest("No shard when max size is bigger than model size"): shards, index = shard_checkpoint(state_dict) self.assertIsNone(index) self.assertDictEqual(shards, {WEIGHTS_NAME: state_dict}) with self.subTest("Test sharding, no weights bigger than max size"): shards, index = shard_checkpoint(state_dict, max_shard_size="300kB") # Split is first two layers then last two. self.assertDictEqual( index, { "metadata": {"total_size": 340000}, "weight_map": { "0.weight": "pytorch_model-00001-of-00002.bin", "1.weight": "pytorch_model-00001-of-00002.bin", "2.weight": "pytorch_model-00002-of-00002.bin", "3.weight": "pytorch_model-00002-of-00002.bin", }, }, ) shard1 = {"0.weight": state_dict["0.weight"], "1.weight": state_dict["1.weight"]} shard2 = {"2.weight": state_dict["2.weight"], "3.weight": state_dict["3.weight"]} self.assertDictEqual( shards, {"pytorch_model-00001-of-00002.bin": shard1, "pytorch_model-00002-of-00002.bin": shard2} ) with self.subTest("Test sharding with weights bigger than max size"): shards, index = shard_checkpoint(state_dict, max_shard_size="100kB") # Split is first layer, second layer then last 2. self.assertDictEqual( index, { "metadata": {"total_size": 340000}, "weight_map": { "0.weight": "pytorch_model-00001-of-00003.bin", "1.weight": "pytorch_model-00002-of-00003.bin", "2.weight": "pytorch_model-00003-of-00003.bin", "3.weight": "pytorch_model-00003-of-00003.bin", }, }, ) shard1 = {"0.weight": state_dict["0.weight"]} shard2 = {"1.weight": state_dict["1.weight"]} shard3 = {"2.weight": state_dict["2.weight"], "3.weight": state_dict["3.weight"]} self.assertDictEqual( shards, { "pytorch_model-00001-of-00003.bin": shard1, "pytorch_model-00002-of-00003.bin": shard2, "pytorch_model-00003-of-00003.bin": shard3, }, ) def test_checkpoint_sharding_local(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: # We use the same folder for various sizes to make sure a new save erases the old checkpoint. for max_size in ["50kB", "50kiB", "100kB", "100kiB", "200kB", "200kiB"]: model.save_pretrained(tmp_dir, max_shard_size=max_size) # Get each shard file and its size shard_to_size = {} for shard in os.listdir(tmp_dir): if shard.endswith(".bin"): shard_file = os.path.join(tmp_dir, shard) shard_to_size[shard_file] = os.path.getsize(shard_file) index_file = os.path.join(tmp_dir, WEIGHTS_INDEX_NAME) # Check there is an index but no regular weight file self.assertTrue(os.path.isfile(index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) # Check a file is bigger than max_size only when it has a single weight for shard_file, size in shard_to_size.items(): if max_size.endswith("kiB"): max_size_int = int(max_size[:-3]) * 2**10 else: max_size_int = int(max_size[:-2]) * 10**3 # Note: pickle adds some junk so the weight of the file can end up being slightly bigger than # the size asked for (since we count parameters) if size >= max_size_int + 50000: state_dict = torch.load(shard_file) self.assertEqual(len(state_dict), 1) # Check the index and the shard files found match with open(index_file, "r", encoding="utf-8") as f: index = json.loads(f.read()) all_shards = set(index["weight_map"].values()) shards_found = {f for f in os.listdir(tmp_dir) if f.endswith(".bin")} self.assertSetEqual(all_shards, shards_found) # Finally, check the model can be reloaded new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) def test_checkpoint_sharding_from_hub(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded") # the model above is the same as the model below, just a sharded version. ref_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") for p1, p2 in zip(model.parameters(), ref_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) def test_checkpoint_variant_local(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, variant="v2") weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"]) weights_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained(tmp_dir) new_model = BertModel.from_pretrained(tmp_dir, variant="v2") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) def test_checkpoint_variant_local_sharded(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, variant="v2", max_shard_size="50kB") weights_index_name = ".".join(WEIGHTS_INDEX_NAME.split(".")[:-1] + ["v2"] + ["json"]) weights_index_file = os.path.join(tmp_dir, weights_index_name) self.assertTrue(os.path.isfile(weights_index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_INDEX_NAME))) for i in range(1, 5): weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + [f"v2-0000{i}-of-00005"] + ["bin"]) weights_name_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_name_file)) with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained(tmp_dir) new_model = BertModel.from_pretrained(tmp_dir, variant="v2") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) @require_safetensors def test_checkpoint_variant_local_safe(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, variant="v2", safe_serialization=True) weights_name = ".".join(SAFE_WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["safetensors"]) weights_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained(tmp_dir) new_model = BertModel.from_pretrained(tmp_dir, variant="v2") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) @require_safetensors def test_checkpoint_variant_local_sharded_safe(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, variant="v2", max_shard_size="50kB", safe_serialization=True) weights_index_name = ".".join(SAFE_WEIGHTS_INDEX_NAME.split(".")[:-1] + ["v2"] + ["json"]) weights_index_file = os.path.join(tmp_dir, weights_index_name) self.assertTrue(os.path.isfile(weights_index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))) for i in range(1, 5): weights_name = ".".join(SAFE_WEIGHTS_NAME.split(".")[:-1] + [f"v2-0000{i}-of-00005"] + ["safetensors"]) weights_name_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_name_file)) with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained(tmp_dir) new_model = BertModel.from_pretrained(tmp_dir, variant="v2") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) def test_checkpoint_variant_hub(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-variant", cache_dir=tmp_dir) model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant", cache_dir=tmp_dir, variant="v2" ) self.assertIsNotNone(model) def test_checkpoint_variant_hub_sharded(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-sharded", cache_dir=tmp_dir ) model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-sharded", cache_dir=tmp_dir, variant="v2" ) self.assertIsNotNone(model) @require_safetensors def test_checkpoint_variant_hub_safe(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-variant-safe", cache_dir=tmp_dir) model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-safe", cache_dir=tmp_dir, variant="v2" ) self.assertIsNotNone(model) @require_safetensors def test_checkpoint_variant_hub_sharded_safe(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-sharded-safe", cache_dir=tmp_dir ) model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-sharded-safe", cache_dir=tmp_dir, variant="v2" ) self.assertIsNotNone(model) def test_checkpoint_variant_save_load(self): with tempfile.TemporaryDirectory() as tmp_dir: model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant", cache_dir=tmp_dir, variant="v2" ) weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"]) model.save_pretrained(tmp_dir, variant="v2") # saving will create a variant checkpoint self.assertTrue(os.path.isfile(os.path.join(tmp_dir, weights_name))) model.save_pretrained(tmp_dir) # saving shouldn't delete variant checkpoints weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"]) self.assertTrue(os.path.isfile(os.path.join(tmp_dir, weights_name))) # there should be a normal checkpoint self.assertTrue(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) self.assertIsNotNone(model) @require_accelerate @mark.accelerate_tests def test_from_pretrained_low_cpu_mem_usage_functional(self): # test that we can use `from_pretrained(..., low_cpu_mem_usage=True)` with normal and # sharded models mnames = [ "hf-internal-testing/tiny-random-bert-sharded", "hf-internal-testing/tiny-random-bert", ] for mname in mnames: _ = BertModel.from_pretrained(mname, low_cpu_mem_usage=True) @require_usr_bin_time @require_accelerate @mark.accelerate_tests def test_from_pretrained_low_cpu_mem_usage_measured(self): # test that `from_pretrained(..., low_cpu_mem_usage=True)` uses less cpu memory than default mname = "bert-base-cased" preamble = "from transformers import AutoModel" one_liner_str = f'{preamble}; AutoModel.from_pretrained("{mname}", low_cpu_mem_usage=False)' max_rss_normal = self.python_one_liner_max_rss(one_liner_str) # print(f"{max_rss_normal=}") one_liner_str = f'{preamble}; AutoModel.from_pretrained("{mname}", low_cpu_mem_usage=True)' max_rss_low_mem = self.python_one_liner_max_rss(one_liner_str) # print(f"{max_rss_low_mem=}") diff_bytes = max_rss_normal - max_rss_low_mem diff_percent = diff_bytes / max_rss_low_mem # print(f"{diff_bytes=}, {diff_percent=}") # ideally we would compare that the diff is close to ~1x checkpoint size in bytes, but # measuring cpu memory on linux is very tricky and inconsistent, so instead let's check that # it's at least 15% less cpu memory consumed self.assertGreater( diff_percent, 0.15, "should use less CPU memory for low_cpu_mem_usage=True, " f"but got max_rss_normal={max_rss_normal} and max_rss_low_mem={max_rss_low_mem}", ) # if you want to compare things manually, let's first look at the size of the model in bytes # model = BertModel.from_pretrained(mname, low_cpu_mem_usage=False) # total_numel = sum(dict((p.data_ptr(), p.numel()) for p in model.parameters()).values()) # total_bytes = total_numel * 4 # 420MB # Now the diff_bytes should be very close to total_bytes, but the reports are inconsistent. # The easiest way to test this is to switch the model and torch.load to do all the work on # gpu - that way one can measure exactly the total and peak memory used. Perhaps once we add # functionality to load models directly on gpu, this test can be rewritten to use torch's # cuda memory tracking and then we should be able to do a much more precise test. @require_accelerate @mark.accelerate_tests @require_torch_multi_gpu @slow def test_model_parallelism_gpt2(self): device_map = {"transformer.wte": 0, "transformer.wpe": 0, "lm_head": 0, "transformer.ln_f": 1} for i in range(12): device_map[f"transformer.h.{i}"] = 0 if i <= 5 else 1 model = AutoModelForCausalLM.from_pretrained("gpt2", device_map=device_map) tokenizer = AutoTokenizer.from_pretrained("gpt2") inputs = tokenizer("Hello, my name is", return_tensors="pt") output = model.generate(inputs["input_ids"].to(0)) text_output = tokenizer.decode(output[0].tolist()) self.assertEqual(text_output, "Hello, my name is John. I'm a writer, and I'm a writer. I'm") @require_accelerate @mark.accelerate_tests @require_torch_gpu def test_from_pretrained_disk_offload_task_model(self): model = AutoModel.from_pretrained("hf-internal-testing/tiny-random-gpt2") device_map = { "transformer.wte": 0, "transformer.wpe": 0, "transformer.h.0": "cpu", "transformer.h.1": "cpu", "transformer.h.2": "cpu", "transformer.h.3": "disk", "transformer.h.4": "disk", "transformer.ln_f": 0, "lm_head": 0, } with tempfile.TemporaryDirectory() as tmp_dir: inputs = torch.tensor([[1, 2, 3]]).to(0) model.save_pretrained(tmp_dir) new_model = AutoModelForCausalLM.from_pretrained(tmp_dir).to(0) outputs1 = new_model.to(0)(inputs) offload_folder = os.path.join(tmp_dir, "offload") new_model_with_offload = AutoModelForCausalLM.from_pretrained( tmp_dir, device_map=device_map, offload_folder=offload_folder ) outputs2 = new_model_with_offload(inputs) self.assertTrue(torch.allclose(outputs1.logits.cpu(), outputs2.logits.cpu())) # With state dict temp offload offload_folder = os.path.join(tmp_dir, "offload") new_model_with_offload = AutoModelForCausalLM.from_pretrained( tmp_dir, device_map=device_map, offload_folder=offload_folder, offload_state_dict=True, ) outputs2 = new_model_with_offload(inputs) self.assertTrue(torch.allclose(outputs1.logits.cpu(), outputs2.logits.cpu())) def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # This check we did call the fake head request mock_head.assert_called() def test_load_from_one_file(self): try: tmp_file = tempfile.mktemp() with open(tmp_file, "wb") as f: http_get( "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/pytorch_model.bin", f ) config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") _ = BertModel.from_pretrained(tmp_file, config=config) finally: os.remove(tmp_file) def test_legacy_load_from_url(self): # This test is for deprecated behavior and can be removed in v5 config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") _ = BertModel.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/pytorch_model.bin", config=config ) @require_safetensors def test_use_safetensors(self): # test nice error message if no safetensor files available with self.assertRaises(OSError) as env_error: AutoModel.from_pretrained("hf-internal-testing/tiny-random-RobertaModel", use_safetensors=True) self.assertTrue( "model.safetensors or model.safetensors.index.json and thus cannot be loaded with `safetensors`" in str(env_error.exception) ) # test that error if only safetensors is available with self.assertRaises(OSError) as env_error: BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-safetensors", use_safetensors=False) self.assertTrue("does not appear to have a file named pytorch_model.bin" in str(env_error.exception)) # test that only safetensors if both available and use_safetensors=False with tempfile.TemporaryDirectory() as tmp_dir: CLIPTextModel.from_pretrained( "hf-internal-testing/diffusers-stable-diffusion-tiny-all", subfolder="text_encoder", use_safetensors=False, cache_dir=tmp_dir, ) all_downloaded_files = glob.glob(os.path.join(tmp_dir, "*", "snapshots", "*", "*", "*")) self.assertTrue(any(f.endswith("bin") for f in all_downloaded_files)) self.assertFalse(any(f.endswith("safetensors") for f in all_downloaded_files)) # test that no safetensors if both available and use_safetensors=True with tempfile.TemporaryDirectory() as tmp_dir: CLIPTextModel.from_pretrained( "hf-internal-testing/diffusers-stable-diffusion-tiny-all", subfolder="text_encoder", use_safetensors=True, cache_dir=tmp_dir, ) all_downloaded_files = glob.glob(os.path.join(tmp_dir, "*", "snapshots", "*", "*", "*")) self.assertTrue(any(f.endswith("safetensors") for f in all_downloaded_files)) self.assertFalse(any(f.endswith("bin") for f in all_downloaded_files)) @require_safetensors def test_safetensors_save_and_load(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) # No pytorch_model.bin file, only a model.safetensors self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) new_model = BertModel.from_pretrained(tmp_dir) # Check models are equal for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) @require_safetensors def test_safetensors_load_from_hub(self): safetensors_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-safetensors") pytorch_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # Check models are equal for p1, p2 in zip(safetensors_model.parameters(), pytorch_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) @require_safetensors def test_safetensors_save_and_load_sharded(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True, max_shard_size="100kB") # No pytorch_model.bin index file, only a model.safetensors index self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_INDEX_NAME))) self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))) # No regular weights file self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) new_model = BertModel.from_pretrained(tmp_dir) # Check models are equal for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) @require_safetensors def test_safetensors_load_from_hub_sharded(self): safetensors_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded-safetensors") pytorch_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded") # Check models are equal for p1, p2 in zip(safetensors_model.parameters(), pytorch_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) def test_base_model_to_head_model_load(self): base_model = BaseModel(PretrainedConfig()) with tempfile.TemporaryDirectory() as tmp_dir: base_model.save_pretrained(tmp_dir) # Can load a base model in a model with head model = ModelWithHead.from_pretrained(tmp_dir) for p1, p2 in zip(model.base.parameters(), base_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) # It doesn't work if the state dict has a mix of keys of the head and base without prefix though. base_state_dict = base_model.state_dict() head_state_dict = model.state_dict() base_state_dict["linear2.weight"] = head_state_dict["linear2.weight"] base_state_dict["linear2.bias"] = head_state_dict["linear2.bias"] torch.save(base_state_dict, os.path.join(tmp_dir, WEIGHTS_NAME)) with self.assertRaisesRegex( ValueError, "The state dictionary of the model you are trying to load is corrupted." ): _ = ModelWithHead.from_pretrained(tmp_dir) def test_tied_weights_reload(self): # Base model = BaseModelWithTiedWeights(PretrainedConfig()) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) new_model = BaseModelWithTiedWeights.from_pretrained(tmp_dir) self.assertIs(new_model.linear.weight, new_model.linear_2.weight) state_dict = model.state_dict() # Remove tied weight from state_dict -> model should load with no complain of missing keys del state_dict["linear_2.weight"] torch.save(state_dict, os.path.join(tmp_dir, WEIGHTS_NAME)) new_model, load_info = BaseModelWithTiedWeights.from_pretrained(tmp_dir, output_loading_info=True) self.assertListEqual(load_info["missing_keys"], []) self.assertIs(new_model.linear.weight, new_model.linear_2.weight) # With head model.save_pretrained(tmp_dir) new_model, load_info = ModelWithHeadAndTiedWeights.from_pretrained(tmp_dir, output_loading_info=True) self.assertIs(new_model.base.linear.weight, new_model.decoder.weight) # Should only complain about the missing bias self.assertListEqual(load_info["missing_keys"], ["decoder.bias"]) def test_unexpected_keys_warnings(self): model = ModelWithHead(PretrainedConfig()) logger = logging.get_logger("transformers.modeling_utils") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) # Loading the model with a new class, we don't get a warning for unexpected weights, just an info with CaptureLogger(logger) as cl: _, loading_info = BaseModel.from_pretrained(tmp_dir, output_loading_info=True) self.assertNotIn("were not used when initializing ModelWithHead", cl.out) self.assertEqual( set(loading_info["unexpected_keys"]), {"linear.weight", "linear.bias", "linear2.weight", "linear2.bias"}, ) # Loading the model with the same class, we do get a warning for unexpected weights state_dict = model.state_dict() state_dict["added_key"] = state_dict["linear.weight"] torch.save(state_dict, os.path.join(tmp_dir, WEIGHTS_NAME)) with CaptureLogger(logger) as cl: _, loading_info = ModelWithHead.from_pretrained(tmp_dir, output_loading_info=True) self.assertIn("were not used when initializing ModelWithHead: ['added_key']", cl.out) self.assertEqual(loading_info["unexpected_keys"], ["added_key"]) def test_warn_if_padding_and_no_attention_mask(self): logger = logging.get_logger("transformers.modeling_utils") with self.subTest("Ensure no warnings when pad_token_id is None."): logger.warning_once.cache_clear() with CaptureLogger(logger) as cl: config_no_pad_token = PretrainedConfig() config_no_pad_token.pad_token_id = None model = ModelWithHead(config_no_pad_token) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure no warnings when there is an attention_mask."): logger.warning_once.cache_clear() with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure no warnings when there are no pad_token_ids in the input_ids."): logger.warning_once.cache_clear() with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[1, 345, 232, 328, 740, 140, 1695, 69, 6078, 2341, 25]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure a warning is shown when the input_ids start with a pad_token_id."): logger.warning_once.cache_clear() with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 432, 5232]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure a warning is shown when the input_ids end with a pad_token_id."): logger.warning_once.cache_clear() with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[432, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure that the warning is shown at most once."): logger.warning_once.cache_clear() with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertEqual(cl.out.count("We strongly recommend passing in an `attention_mask`"), 1) with self.subTest("Ensure a different warning is shown when the pad_token_id is equal to the bos_token_id."): logger.warning_once.cache_clear() with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 config.bos_token_id = config.pad_token_id model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertIn("You may ignore this warning if your `pad_token_id`", cl.out) @require_torch_gpu @slow def test_pretrained_low_mem_new_config(self): # Checking for 1 model(the same one which was described in the issue) . model_ids = ["gpt2"] for model_id in model_ids: model_config = AutoConfig.from_pretrained(pretrained_model_name_or_path=model_id) model_config.n_layer = 48 model_config.n_head = 25 model_config.n_embd = 1600 model = AutoModelForCausalLM.from_pretrained( pretrained_model_name_or_path=model_id, config=model_config, ignore_mismatched_sizes=True, torch_dtype=torch.float16, low_cpu_mem_usage=True, ) model_ref = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path=model_id) self.assertEqual(model.__class__.__name__, model_ref.__class__.__name__) @require_torch @is_staging_test class ModelPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-model") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-model-org") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-model") except HTTPError: pass def test_push_to_hub(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertModel(config) model.push_to_hub("test-model", use_auth_token=self._token) new_model = BertModel.from_pretrained(f"{USER}/test-model") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(token=self._token, repo_id="test-model") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, repo_id="test-model", push_to_hub=True, use_auth_token=self._token) new_model = BertModel.from_pretrained(f"{USER}/test-model") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_push_to_hub_in_organization(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertModel(config) model.push_to_hub("valid_org/test-model-org", use_auth_token=self._token) new_model = BertModel.from_pretrained("valid_org/test-model-org") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-model-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( tmp_dir, push_to_hub=True, use_auth_token=self._token, repo_id="valid_org/test-model-org" ) new_model = BertModel.from_pretrained("valid_org/test-model-org") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_push_to_hub_dynamic_model(self): CustomConfig.register_for_auto_class() CustomModel.register_for_auto_class() config = CustomConfig(hidden_size=32) model = CustomModel(config) model.push_to_hub("test-dynamic-model", use_auth_token=self._token) # checks self.assertDictEqual( config.auto_map, {"AutoConfig": "custom_configuration.CustomConfig", "AutoModel": "custom_modeling.CustomModel"}, ) new_model = AutoModel.from_pretrained(f"{USER}/test-dynamic-model", trust_remote_code=True) # Can't make an isinstance check because the new_model is from the CustomModel class of a dynamic module self.assertEqual(new_model.__class__.__name__, "CustomModel") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) config = AutoConfig.from_pretrained(f"{USER}/test-dynamic-model", trust_remote_code=True) new_model = AutoModel.from_config(config, trust_remote_code=True) self.assertEqual(new_model.__class__.__name__, "CustomModel")
51,504
44.418871
119
py
transformers
transformers-main/tests/test_configuration_utils.py
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPT2Config from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 config_common_kwargs = { "return_dict": False, "output_hidden_states": True, "output_attentions": True, "torchscript": True, "torch_dtype": "float16", "use_bfloat16": True, "tf_legacy_loss": True, "pruned_heads": {"a": 1}, "tie_word_embeddings": False, "is_decoder": True, "cross_attention_hidden_size": 128, "add_cross_attention": True, "tie_encoder_decoder": True, "max_length": 50, "min_length": 3, "do_sample": True, "early_stopping": True, "num_beams": 3, "num_beam_groups": 3, "diversity_penalty": 0.5, "temperature": 2.0, "top_k": 10, "top_p": 0.7, "typical_p": 0.2, "repetition_penalty": 0.8, "length_penalty": 0.8, "no_repeat_ngram_size": 5, "encoder_no_repeat_ngram_size": 5, "bad_words_ids": [1, 2, 3], "num_return_sequences": 3, "chunk_size_feed_forward": 5, "output_scores": True, "return_dict_in_generate": True, "forced_bos_token_id": 2, "forced_eos_token_id": 3, "remove_invalid_values": True, "architectures": ["BertModel"], "finetuning_task": "translation", "id2label": {0: "label"}, "label2id": {"label": "0"}, "tokenizer_class": "BertTokenizerFast", "prefix": "prefix", "bos_token_id": 6, "pad_token_id": 7, "eos_token_id": 8, "sep_token_id": 9, "decoder_start_token_id": 10, "exponential_decay_length_penalty": (5, 1.01), "suppress_tokens": [0, 1], "begin_suppress_tokens": 2, "task_specific_params": {"translation": "some_params"}, "problem_type": "regression", } @is_staging_test class ConfigPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-config") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-config-org") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-config") except HTTPError: pass def test_push_to_hub(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) config.push_to_hub("test-config", use_auth_token=self._token) new_config = BertConfig.from_pretrained(f"{USER}/test-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) # Reset repo delete_repo(token=self._token, repo_id="test-config") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir, repo_id="test-config", push_to_hub=True, use_auth_token=self._token) new_config = BertConfig.from_pretrained(f"{USER}/test-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) def test_push_to_hub_in_organization(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) config.push_to_hub("valid_org/test-config-org", use_auth_token=self._token) new_config = BertConfig.from_pretrained("valid_org/test-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-config-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( tmp_dir, repo_id="valid_org/test-config-org", push_to_hub=True, use_auth_token=self._token ) new_config = BertConfig.from_pretrained("valid_org/test-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) def test_push_to_hub_dynamic_config(self): CustomConfig.register_for_auto_class() config = CustomConfig(attribute=42) config.push_to_hub("test-dynamic-config", use_auth_token=self._token) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map, {"AutoConfig": "custom_configuration.CustomConfig"}) new_config = AutoConfig.from_pretrained(f"{USER}/test-dynamic-config", trust_remote_code=True) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__, "CustomConfig") self.assertEqual(new_config.attribute, 42) class ConfigTestUtils(unittest.TestCase): def test_config_from_string(self): c = GPT2Config() # attempt to modify each of int/float/bool/str config records and verify they were updated n_embd = c.n_embd + 1 # int resid_pdrop = c.resid_pdrop + 1.0 # float scale_attn_weights = not c.scale_attn_weights # bool summary_type = c.summary_type + "foo" # str c.update_from_string( f"n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}" ) self.assertEqual(n_embd, c.n_embd, "mismatch for key: n_embd") self.assertEqual(resid_pdrop, c.resid_pdrop, "mismatch for key: resid_pdrop") self.assertEqual(scale_attn_weights, c.scale_attn_weights, "mismatch for key: scale_attn_weights") self.assertEqual(summary_type, c.summary_type, "mismatch for key: summary_type") def test_config_common_kwargs_is_complete(self): base_config = PretrainedConfig() missing_keys = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( missing_keys, ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] ) keys_with_defaults = [key for key, value in config_common_kwargs.items() if value == getattr(base_config, key)] if len(keys_with_defaults) > 0: raise ValueError( "The following keys are set with the default values in" " `test_configuration_common.config_common_kwargs` pick another value for them:" f" {', '.join(keys_with_defaults)}." ) def test_from_pretrained_subfolder(self): with self.assertRaises(OSError): # config is in subfolder, the following should not work without specifying the subfolder _ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder") config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder", subfolder="bert") self.assertIsNotNone(config) def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") # This check we did call the fake head request mock_head.assert_called() def test_legacy_load_from_url(self): # This test is for deprecated behavior and can be removed in v5 _ = BertConfig.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" ) def test_local_versioning(self): configuration = AutoConfig.from_pretrained("bert-base-cased") configuration.configuration_files = ["config.4.0.0.json"] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(tmp_dir) configuration.hidden_size = 2 json.dump(configuration.to_dict(), open(os.path.join(tmp_dir, "config.4.0.0.json"), "w")) # This should pick the new configuration file as the version of Transformers is > 4.0.0 new_configuration = AutoConfig.from_pretrained(tmp_dir) self.assertEqual(new_configuration.hidden_size, 2) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 configuration.configuration_files = ["config.42.0.0.json"] configuration.hidden_size = 768 configuration.save_pretrained(tmp_dir) shutil.move(os.path.join(tmp_dir, "config.4.0.0.json"), os.path.join(tmp_dir, "config.42.0.0.json")) new_configuration = AutoConfig.from_pretrained(tmp_dir) self.assertEqual(new_configuration.hidden_size, 768) def test_repo_versioning_before(self): # This repo has two configuration files, one for v4.0.0 and above with a different hidden size. repo = "hf-internal-testing/test-two-configs" import transformers as new_transformers new_transformers.configuration_utils.__version__ = "v4.0.0" new_configuration, kwargs = new_transformers.models.auto.AutoConfig.from_pretrained( repo, return_unused_kwargs=True ) self.assertEqual(new_configuration.hidden_size, 2) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(kwargs, {}) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers old_transformers.configuration_utils.__version__ = "v3.0.0" old_configuration = old_transformers.models.auto.AutoConfig.from_pretrained(repo) self.assertEqual(old_configuration.hidden_size, 768)
11,757
39.968641
124
py
transformers
transformers-main/tests/test_modeling_flax_common.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import json import random import tempfile from typing import List, Tuple import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import CaptureLogger, is_pt_flax_cross_test, require_flax, torch_device from transformers.utils import CONFIG_NAME, GENERATION_CONFIG_NAME, logging from transformers.utils.generic import ModelOutput if is_flax_available(): import os import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict from transformers import ( FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, FLAX_MODEL_MAPPING, FlaxAutoModel, FlaxAutoModelForSequenceClassification, FlaxBertModel, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.modeling_flax_utils import FLAX_WEIGHTS_INDEX_NAME, FLAX_WEIGHTS_NAME os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.12" # assumed parallelism: 8 if is_torch_available(): import torch def ids_tensor(shape, vocab_size, rng=None): """Creates a random int32 tensor of the shape within the vocab size.""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) output = np.array(values, dtype=jnp.int32).reshape(shape) return output def floats_tensor(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.random() * scale) return np.array(values, dtype=jnp.float32).reshape(shape) def random_attention_mask(shape, rng=None): attn_mask = ids_tensor(shape, vocab_size=2, rng=rng) # make sure that at least one token is attended to for each batch attn_mask[:, -1] = 1 return attn_mask def get_params(params, from_head_prefix=None): """Function extracts relevant parameters into flatten dict from model params, appends batch normalization statistics if present""" # If Both parameters and batch normalization statistics are present if "batch_stats" in params: # Extract only parameters for the specified head prefix (if specified) and add batch statistics if from_head_prefix is not None: extracted_params = flatten_dict(unfreeze(params["params"][from_head_prefix])) extracted_params.update(flatten_dict(params["batch_stats"][from_head_prefix])) else: extracted_params = flatten_dict(unfreeze(params["params"])) extracted_params.update(flatten_dict(params["batch_stats"])) # Only parameters are present else: if from_head_prefix is not None: extracted_params = flatten_dict(unfreeze(params[from_head_prefix])) else: extracted_params = flatten_dict(unfreeze(params)) return extracted_params @require_flax class FlaxModelTesterMixin: model_tester = None all_model_classes = () test_mismatched_shapes = True is_encoder_decoder = False test_head_masking = False has_attentions = True def _prepare_for_class(self, inputs_dict, model_class): inputs_dict = copy.deepcopy(inputs_dict) # hack for now until we have AutoModel classes if "ForMultipleChoice" in model_class.__name__: inputs_dict = { k: jnp.broadcast_to(v[:, None], (v.shape[0], self.model_tester.num_choices, v.shape[-1])) if isinstance(v, (jnp.ndarray, np.ndarray)) and k != "indices_prng_key" else v for k, v in inputs_dict.items() } return inputs_dict def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).") def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assert_almost_equals(jnp.nan_to_num(tuple_object), jnp.nan_to_num(dict_object), 1e-5) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) # (Copied from tests.test_modeling_common.ModelTesterMixin.check_pt_flax_outputs) def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None): """ Args: model_class: The class of the model that is currently testing. For example, ..., etc. Currently unused, but it could make debugging easier and faster. names: A string, or a list of strings. These specify what fx_outputs/pt_outputs represent in the model outputs. Currently unused, but in the future, we could use this information to make the error message clearer by giving the name(s) of the output tensor(s) with large difference(s) between PT and Flax. """ self.assertEqual(type(name), str) if attributes is not None: self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") # Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`). if isinstance(fx_outputs, ModelOutput): self.assertTrue( isinstance(pt_outputs, ModelOutput), f"{name}: `pt_outputs` should an instance of `ModelOutput` when `fx_outputs` is", ) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys, f"{name}: Output keys differ between Flax and PyTorch") # convert to the case of `tuple` # appending each key to the current (string) `name` attributes = tuple([f"{name}.{k}" for k in fx_keys]) self.check_pt_flax_outputs( fx_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes ) # Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.) elif type(fx_outputs) in [tuple, list]: self.assertEqual( type(fx_outputs), type(pt_outputs), f"{name}: Output types differ between Flax and PyTorch" ) self.assertEqual( len(fx_outputs), len(pt_outputs), f"{name}: Output lengths differ between Flax and PyTorch" ) if attributes is not None: # case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`) self.assertEqual( len(attributes), len(fx_outputs), f"{name}: The tuple `attributes` should have the same length as `fx_outputs`", ) else: # case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `name` attributes = tuple([f"{name}_{idx}" for idx in range(len(fx_outputs))]) for fx_output, pt_output, attr in zip(fx_outputs, pt_outputs, attributes): self.check_pt_flax_outputs(fx_output, pt_output, model_class, tol=tol, name=attr) elif isinstance(fx_outputs, jnp.ndarray): self.assertTrue( isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `fx_outputs` is" ) # Using `np.asarray` gives `ValueError: assignment destination is read-only` at the line `fx_outputs[fx_nans] = 0`. fx_outputs = np.array(fx_outputs) pt_outputs = pt_outputs.detach().to("cpu").numpy() self.assertEqual( fx_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between Flax and PyTorch" ) # deal with NumPy's scalars to make replacing nan values by 0 work. if np.isscalar(fx_outputs): fx_outputs = np.array([fx_outputs]) pt_outputs = np.array([pt_outputs]) fx_nans = np.isnan(fx_outputs) pt_nans = np.isnan(pt_outputs) pt_outputs[fx_nans] = 0 fx_outputs[fx_nans] = 0 pt_outputs[pt_nans] = 0 fx_outputs[pt_nans] = 0 max_diff = np.amax(np.abs(fx_outputs - pt_outputs)) self.assertLessEqual( max_diff, tol, f"{name}: Difference between PyTorch and Flax is {max_diff} (>= {tol})." ) else: raise ValueError( "`fx_outputs` should be an instance of `ModelOutput`, a `tuple`, or an instance of `jnp.ndarray`. Got" f" {type(fx_outputs)} instead." ) @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): # It might be better to put this inside the for loop below (because we modify the config there). # But logically, it is fine. config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist(), device=torch_device) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False fx_model = model_class(config, dtype=jnp.float32) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state # send pytorch model to the correct device pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**prepared_inputs_dict) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict) fx_keys = tuple([k for k, v in fx_outputs_loaded.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs_loaded, pt_outputs, model_class) @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist(), device=torch_device) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False fx_model = model_class(config, dtype=jnp.float32) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) # make sure weights are tied in PyTorch pt_model.tie_weights() # send pytorch model to the correct device pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**prepared_inputs_dict) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True) # send pytorch model to the correct device pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs_loaded.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs_loaded, model_class) def test_from_pretrained_save_pretrained(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): model = model_class(config) prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) outputs = model(**prepared_inputs_dict).to_tuple() # verify that normal save_pretrained works as expected with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # the config file (and the generation config file, if it can generate) should be saved self.assertTrue(os.path.exists(os.path.join(tmpdirname, CONFIG_NAME))) self.assertEqual( model.can_generate(), os.path.exists(os.path.join(tmpdirname, GENERATION_CONFIG_NAME)) ) model_loaded = model_class.from_pretrained(tmpdirname) outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple() for output_loaded, output in zip(outputs_loaded, outputs): self.assert_almost_equals(output_loaded, output, 1e-3) # verify that save_pretrained for distributed training # with `params=params` works as expected with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=model.params) model_loaded = model_class.from_pretrained(tmpdirname) outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple() for output_loaded, output in zip(outputs_loaded, outputs): self.assert_almost_equals(output_loaded, output, 1e-3) def test_save_load_from_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = get_params(model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname) base_param_from_head = get_params(head_model.params, from_head_prefix=head_model.base_model_prefix) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_save_load_to_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = get_params(model.params, from_head_prefix=model.base_model_prefix) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname) base_params = get_params(base_model.params) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_from_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = get_params(model.params) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, base_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: # save pt model pt_model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname, from_pt=True) base_param_from_head = get_params(head_model.params, from_head_prefix=head_model.base_model_prefix) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = get_params(model.params, from_head_prefix=model.base_model_prefix) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = get_params(base_model.params) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_bf16_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) model.params = model.to_bf16(model.params) base_params_from_head = get_params(model.params, from_head_prefix=model.base_model_prefix) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = get_params(base_model.params) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_jit_compilation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def model_jitted(input_ids, attention_mask=None, **kwargs): return model(input_ids=input_ids, attention_mask=attention_mask, **kwargs) with self.subTest("JIT Enabled"): jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = model_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = [ "input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["input_ids", "attention_mask"] self.assertListEqual(arg_names[:2], expected_arg_names) def test_naming_convention(self): for model_class in self.all_model_classes: model_class_name = model_class.__name__ module_class_name = ( model_class_name[:-5] + "Module" if model_class_name[-5:] == "Model" else model_class_name + "Module" ) bert_modeling_flax_module = __import__(model_class.__module__, fromlist=[module_class_name]) module_cls = getattr(bert_modeling_flax_module, module_class_name) self.assertIsNotNone(module_cls) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="Model does not output attentions") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_length = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_length) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_length) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # Question Answering model returns start_logits and end_logits if model_class in get_values(FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING): correct_outlen += 1 # start_logits and end_logits instead of only 1 output self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_load_with_mismatched_shapes(self): if not self.test_mismatched_shapes: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class not in get_values(FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING): continue with self.subTest(msg=f"Testing {model_class}"): with tempfile.TemporaryDirectory() as tmp_dir: model = model_class(config) model.save_pretrained(tmp_dir) # Fails when we don't set ignore_mismatched_sizes=True with self.assertRaises(ValueError): new_model = FlaxAutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42) with self.assertRaises(ValueError): new_model_without_prefix = FlaxAutoModel.from_pretrained(tmp_dir, vocab_size=10) logger = logging.get_logger("transformers.modeling_flax_utils") with CaptureLogger(logger) as cl: new_model = FlaxAutoModelForSequenceClassification.from_pretrained( tmp_dir, num_labels=42, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) logits = new_model(**inputs_dict)["logits"] self.assertEqual(logits.shape[1], 42) with CaptureLogger(logger) as cl: new_model_without_prefix = FlaxAutoModel.from_pretrained( tmp_dir, vocab_size=10, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) input_ids = ids_tensor((2, 8), 10) if self.is_encoder_decoder: new_model_without_prefix(input_ids, decoder_input_ids=input_ids) else: new_model_without_prefix(input_ids) def test_default_params_dtype(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # check if all params are still in float32 when dtype of computation is half-precision model = model_class(config, dtype=jnp.float16) types = jax.tree_util.tree_map(lambda x: x.dtype, model.params) types = flatten_dict(types) for name, type_ in types.items(): self.assertEquals(type_, jnp.float32, msg=f"param {name} is not initialized in fp32.") def test_to_bf16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # cast all params to bf16 params = model.to_bf16(model.params) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) # test if all params are in bf16 for name, type_ in types.items(): self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.") # test masking flat_params = flatten_dict(params) key = random.choice(list(flat_params.keys())) # choose a random param mask = {path: path != key for path in flat_params} # don't cast the key mask = unflatten_dict(mask) params = model.to_bf16(model.params, mask) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) # test if all params are in bf16 except key for name, type_ in types.items(): if name == key: self.assertEqual(type_, jnp.float32, msg=f"param {name} should be in fp32.") else: self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.") def test_to_fp16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # cast all params to fp16 params = model.to_fp16(model.params) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) # test if all params are in fp16 for name, type_ in types.items(): self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.") # test masking flat_params = flatten_dict(params) key = random.choice(list(flat_params.keys())) # choose a random param mask = {path: path != key for path in flat_params} # don't cast the key mask = unflatten_dict(mask) params = model.to_fp16(model.params, mask) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) # test if all params are in fp16 except key for name, type_ in types.items(): if name == key: self.assertEqual(type_, jnp.float32, msg=f"param {name} should be in fp32.") else: self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.") def test_to_fp32(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # cast all params to fp16 and back to fp32 params = model.to_fp16(model.params) params = model.to_fp32(params) # test if all params are in fp32 types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) for name, type_ in types.items(): self.assertEqual(type_, jnp.float32, msg=f"param {name} is not in fp32.") # test masking flat_params = flatten_dict(params) key = random.choice(list(flat_params.keys())) # choose a random param mask = {path: path != key for path in flat_params} # don't cast the key mask = unflatten_dict(mask) # cast to fp16 and back to fp32 with mask params = model.to_fp16(model.params) params = model.to_fp32(params, mask) # test if all params are in fp32 except key types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) for name, type_ in types.items(): if name == key: self.assertEqual(type_, jnp.float16, msg=f"param {name} should be in fp16.") else: self.assertEqual(type_, jnp.float32, msg=f"param {name} is not in fp32.") def test_save_load_in_fp16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # convert weights to fp16 and save params = model.to_fp16(model.params) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=params) # load the weights again and check if they are still in fp16 model = model_class.from_pretrained(tmpdirname) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, model.params)) for name, type_ in types.items(): self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.") def test_save_load_in_bf16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # convert weights to bf16 and save params = model.to_bf16(model.params) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=params) # load the weights again and check if they are still in fp16 model = model_class.from_pretrained(tmpdirname) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, model.params)) for name, type_ in types.items(): self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.") def test_model_main_input_name(self): for model_class in self.all_model_classes: model_signature = inspect.signature(getattr(model_class, "__call__")) # The main input is the name of the argument after `self` observed_main_input_name = list(model_signature.parameters.keys())[1] self.assertEqual(model_class.main_input_name, observed_main_input_name) def test_headmasking(self): if not self.test_head_masking: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True def _prepare_layer_head_mask(i, attention_heads, num_hidden_layers): if i == 0: return np.concatenate([np.zeros(1, dtype=jnp.int32), np.ones(attention_heads - 1, dtype=jnp.int32)]) if i == num_hidden_layers - 1: return np.concatenate([np.zeros(attention_heads - 1, dtype=jnp.int32), np.ones(1, dtype=jnp.int32)]) return np.ones(attention_heads, dtype=jnp.int32) for model_class in self.all_model_classes: model = model_class(config) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False inputs = self._prepare_for_class(inputs_dict, model_class).copy() # Prepare head mask inputs["head_mask"] = np.stack( [ _prepare_layer_head_mask(i, config.num_attention_heads, config.num_hidden_layers) for i in range(config.num_hidden_layers) ] ) outputs = model(**inputs) def _check_attentions_validity(attentions): # Remove NaN for t in attentions: # Check we don't have more than 25% nans (arbitrary) self.assertLess(np.isnan(t).sum(), t.size / 4) attentions = [np.where(np.isnan(t), 0.0, t) for t in attentions] self.assertAlmostEqual(attentions[0][..., 0, :, :].sum(), 0.0) self.assertNotEqual(attentions[0][..., -1, :, :].sum(), 0.0) if len(attentions) > 2: # encoder-decodere models have only 2 layers in each modules self.assertNotEqual(attentions[1][..., 0, :, :].sum(), 0.0) self.assertAlmostEqual(attentions[-1][..., -2, :, :].sum(), 0.0) self.assertNotEqual(attentions[-1][..., -1, :, :].sum(), 0.0) if model.config.is_encoder_decoder: raise NotImplementedError("The test has not been implemented for encoder-decoder models yet.") else: _check_attentions_validity(outputs.attentions) def test_no_automatic_init(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: model = model_class(config, _do_init=False) # Check that accesing parmas raises an ValueError when _do_init is False with self.assertRaises(ValueError): params = model.params # Check if we params can be properly initialized when calling init_weights params = model.init_weights(model.key, model.input_shape) self.assertIsInstance(params, FrozenDict) # Check if all required parmas are initialized keys = set(flatten_dict(unfreeze(params)).keys()) self.assertTrue(all(k in keys for k in model.required_params)) # Check if the shapes match flat_params = flatten_dict(unfreeze(params)) for k, v in flatten_dict(unfreeze(model.params_shape_tree)).items(): self.assertEqual( v.shape, flat_params[k].shape, "Shapes of {} do not match. Expecting {}, got {}.".format(k, v.shape, flat_params[k].shape), ) # Check that setting params raises an ValueError when _do_init is False with self.assertRaises(ValueError): model.params = params # Check if we can do a forward pass inputs_dict["output_hidden_states"] = True inputs = self._prepare_for_class(inputs_dict, model_class).copy() model(**inputs, params=params) def test_from_pretrained_with_no_automatic_init(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True def _assert_all_params_initialised(model, params): # Check if all required parmas are loaded keys = set(flatten_dict(unfreeze(params)).keys()) self.assertTrue(all(k in keys for k in model.required_params)) # Check if the shapes match flat_params = flatten_dict(unfreeze(params)) for k, v in flatten_dict(unfreeze(model.params_shape_tree)).items(): self.assertEqual( v.shape, flat_params[k].shape, "Shapes of {} do not match. Expecting {}, got {}.".format(k, v.shape, flat_params[k].shape), ) for model_class in self.all_model_classes: # init the model model = model_class(config) # save the model in the temporary directory # load the saved model with _do_init=False with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model, params = model_class.from_pretrained(tmpdirname, _do_init=False) # Check that accesing parmas raises an ValueError when _do_init is False with self.assertRaises(ValueError): params = model.params # Check if all required parmas are loaded _assert_all_params_initialised(model, params) # Check that setting params raises an ValueError when _do_init is False with self.assertRaises(ValueError): model.params = params # Check if init_weights initializes missing keys from from_pretrained flat_params = flatten_dict(unfreeze(params)) random_key = random.choice(list(flat_params.keys())) flat_params.pop(random_key) params = freeze(unflatten_dict(flat_params)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=params) model, params = model_class.from_pretrained(tmpdirname, _do_init=False) params = model.init_weights(model.key, model.input_shape, params=params) # Check if all required parmas are loaded _assert_all_params_initialised(model, params) def test_checkpoint_sharding_from_hub(self): model = FlaxBertModel.from_pretrained("ArthurZ/flax-tiny-random-bert-sharded") # the model above is the same as the model below, just a sharded version. ref_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") for p1, p2 in zip(flatten_dict(model.params).values(), flatten_dict(ref_model.params).values()): assert np.allclose(np.array(p1), np.array(p2)) def test_checkpoint_sharding_local(self): model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") with tempfile.TemporaryDirectory() as tmp_dir: # We use the same folder for various sizes to make sure a new save erases the old checkpoint. for max_size in ["150kB", "150kiB", "200kB", "200kiB"]: model.save_pretrained(tmp_dir, max_shard_size=max_size) # Get each shard file and its size shard_to_size = {} for shard in os.listdir(tmp_dir): if shard.endswith(".msgpack"): shard_file = os.path.join(tmp_dir, shard) shard_to_size[shard_file] = os.path.getsize(shard_file) index_file = os.path.join(tmp_dir, FLAX_WEIGHTS_INDEX_NAME) # Check there is an index but no regular weight file self.assertTrue(os.path.isfile(index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, FLAX_WEIGHTS_NAME))) # Check a file is bigger than max_size only when it has a single weight for shard_file, size in shard_to_size.items(): if max_size.endswith("kiB"): max_size_int = int(max_size[:-3]) * 2**10 else: max_size_int = int(max_size[:-2]) * 10**3 # Note: pickle adds some junk so the weight of the file can end up being slightly bigger than # the size asked for (since we count parameters) if size >= max_size_int + 50000: with open(shard_file, "rb") as state_f: state_file = from_bytes(FlaxBertModel, state_f.read()) self.assertEqual(len(state_file), 1) # Check the index and the shard files found match with open(index_file, "r", encoding="utf-8") as f: index = json.loads(f.read()) all_shards = set(index["weight_map"].values()) shards_found = {f for f in os.listdir(tmp_dir) if f.endswith(".msgpack")} self.assertSetEqual(all_shards, shards_found) # Finally, check the model can be reloaded new_model = FlaxBertModel.from_pretrained(tmp_dir) for p1, p2 in zip(flatten_dict(model.params).values(), flatten_dict(new_model.params).values()): self.assertTrue(np.allclose(np.array(p1), np.array(p2))) @is_pt_flax_cross_test def test_from_sharded_pt(self): model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded", from_pt=True) ref_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-fx-only") for key, ref_val in flatten_dict(ref_model.params).items(): val = flatten_dict(model.params)[key] assert np.allclose(np.array(val), np.array(ref_val)) def test_gradient_checkpointing(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) remat_model = model_class(config) try: remat_model.enable_gradient_checkpointing() except NotImplementedError: continue outputs = model(**prepared_inputs_dict) remat_outputs = remat_model(**prepared_inputs_dict) # ensure that the dicts of outputs contain the same keys self.assertEqual(outputs.keys(), remat_outputs.keys()) outputs = outputs.to_tuple() remat_outputs = remat_outputs.to_tuple() # ensure that the outputs remain precisely equal for output, remat_output in zip(outputs, remat_outputs): self.assertTrue((output == remat_output).all())
53,493
45.597561
127
py
transformers
transformers-main/tests/test_configuration_common.py
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class ConfigTester(object): def __init__(self, parent, config_class=None, has_text_modality=True, common_properties=None, **kwargs): self.parent = parent self.config_class = config_class self.has_text_modality = has_text_modality self.inputs_dict = kwargs self.common_properties = common_properties def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) common_properties = ( ["hidden_size", "num_attention_heads", "num_hidden_layers"] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(["vocab_size"]) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(config, prop), msg=f"`{prop}` does not exist") # Test that config has the common properties as setter for idx, name in enumerate(common_properties): try: setattr(config, name, idx) self.parent.assertEqual( getattr(config, name), idx, msg=f"`{name} value {idx} expected, but was {getattr(config, name)}" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(common_properties): try: config = self.config_class(**{name: idx}) self.parent.assertEqual( getattr(config, name), idx, msg=f"`{name} value {idx} expected, but was {getattr(config, name)}" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def create_and_test_config_to_json_string(self): config = self.config_class(**self.inputs_dict) obj = json.loads(config.to_json_string()) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key], value) def create_and_test_config_to_json_file(self): config_first = self.config_class(**self.inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "config.json") config_first.to_json_file(json_file_path) config_second = self.config_class.from_json_file(json_file_path) self.parent.assertEqual(config_second.to_dict(), config_first.to_dict()) def create_and_test_config_from_and_save_pretrained(self): config_first = self.config_class(**self.inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(tmpdirname) config_second = self.config_class.from_pretrained(tmpdirname) self.parent.assertEqual(config_second.to_dict(), config_first.to_dict()) def create_and_test_config_from_and_save_pretrained_subfolder(self): config_first = self.config_class(**self.inputs_dict) subfolder = "test" with tempfile.TemporaryDirectory() as tmpdirname: sub_tmpdirname = os.path.join(tmpdirname, subfolder) config_first.save_pretrained(sub_tmpdirname) config_second = self.config_class.from_pretrained(tmpdirname, subfolder=subfolder) self.parent.assertEqual(config_second.to_dict(), config_first.to_dict()) def create_and_test_config_with_num_labels(self): config = self.config_class(**self.inputs_dict, num_labels=5) self.parent.assertEqual(len(config.id2label), 5) self.parent.assertEqual(len(config.label2id), 5) config.num_labels = 3 self.parent.assertEqual(len(config.id2label), 3) self.parent.assertEqual(len(config.label2id), 3) def check_config_can_be_init_without_params(self): if self.config_class.is_composition: return config = self.config_class() self.parent.assertIsNotNone(config) def check_config_arguments_init(self): kwargs = copy.deepcopy(config_common_kwargs) config = self.config_class(**kwargs) wrong_values = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.float16: wrong_values.append(("torch_dtype", config.torch_dtype, torch.float16)) elif getattr(config, key) != value: wrong_values.append((key, getattr(config, key), value)) if len(wrong_values) > 0: errors = "\n".join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values]) raise ValueError(f"The following keys were not properly set in the config:\n{errors}") def run_common_tests(self): self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
6,493
41.168831
116
py
transformers
transformers-main/tests/test_pipeline_mixin.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import json import os import random import unittest from pathlib import Path from transformers.testing_utils import ( is_pipeline_test, require_decord, require_pytesseract, require_timm, require_torch, require_torch_or_tf, require_vision, ) from transformers.utils import direct_transformers_import, logging from .pipelines.test_pipelines_audio_classification import AudioClassificationPipelineTests from .pipelines.test_pipelines_automatic_speech_recognition import AutomaticSpeechRecognitionPipelineTests from .pipelines.test_pipelines_conversational import ConversationalPipelineTests from .pipelines.test_pipelines_depth_estimation import DepthEstimationPipelineTests from .pipelines.test_pipelines_document_question_answering import DocumentQuestionAnsweringPipelineTests from .pipelines.test_pipelines_feature_extraction import FeatureExtractionPipelineTests from .pipelines.test_pipelines_fill_mask import FillMaskPipelineTests from .pipelines.test_pipelines_image_classification import ImageClassificationPipelineTests from .pipelines.test_pipelines_image_segmentation import ImageSegmentationPipelineTests from .pipelines.test_pipelines_image_to_text import ImageToTextPipelineTests from .pipelines.test_pipelines_mask_generation import MaskGenerationPipelineTests from .pipelines.test_pipelines_object_detection import ObjectDetectionPipelineTests from .pipelines.test_pipelines_question_answering import QAPipelineTests from .pipelines.test_pipelines_summarization import SummarizationPipelineTests from .pipelines.test_pipelines_table_question_answering import TQAPipelineTests from .pipelines.test_pipelines_text2text_generation import Text2TextGenerationPipelineTests from .pipelines.test_pipelines_text_classification import TextClassificationPipelineTests from .pipelines.test_pipelines_text_generation import TextGenerationPipelineTests from .pipelines.test_pipelines_token_classification import TokenClassificationPipelineTests from .pipelines.test_pipelines_translation import TranslationPipelineTests from .pipelines.test_pipelines_video_classification import VideoClassificationPipelineTests from .pipelines.test_pipelines_visual_question_answering import VisualQuestionAnsweringPipelineTests from .pipelines.test_pipelines_zero_shot import ZeroShotClassificationPipelineTests from .pipelines.test_pipelines_zero_shot_audio_classification import ZeroShotAudioClassificationPipelineTests from .pipelines.test_pipelines_zero_shot_image_classification import ZeroShotImageClassificationPipelineTests from .pipelines.test_pipelines_zero_shot_object_detection import ZeroShotObjectDetectionPipelineTests pipeline_test_mapping = { "audio-classification": {"test": AudioClassificationPipelineTests}, "automatic-speech-recognition": {"test": AutomaticSpeechRecognitionPipelineTests}, "conversational": {"test": ConversationalPipelineTests}, "depth-estimation": {"test": DepthEstimationPipelineTests}, "document-question-answering": {"test": DocumentQuestionAnsweringPipelineTests}, "feature-extraction": {"test": FeatureExtractionPipelineTests}, "fill-mask": {"test": FillMaskPipelineTests}, "image-classification": {"test": ImageClassificationPipelineTests}, "image-segmentation": {"test": ImageSegmentationPipelineTests}, "image-to-text": {"test": ImageToTextPipelineTests}, "mask-generation": {"test": MaskGenerationPipelineTests}, "object-detection": {"test": ObjectDetectionPipelineTests}, "question-answering": {"test": QAPipelineTests}, "summarization": {"test": SummarizationPipelineTests}, "table-question-answering": {"test": TQAPipelineTests}, "text2text-generation": {"test": Text2TextGenerationPipelineTests}, "text-classification": {"test": TextClassificationPipelineTests}, "text-generation": {"test": TextGenerationPipelineTests}, "token-classification": {"test": TokenClassificationPipelineTests}, "translation": {"test": TranslationPipelineTests}, "video-classification": {"test": VideoClassificationPipelineTests}, "visual-question-answering": {"test": VisualQuestionAnsweringPipelineTests}, "zero-shot": {"test": ZeroShotClassificationPipelineTests}, "zero-shot-audio-classification": {"test": ZeroShotAudioClassificationPipelineTests}, "zero-shot-image-classification": {"test": ZeroShotImageClassificationPipelineTests}, "zero-shot-object-detection": {"test": ZeroShotObjectDetectionPipelineTests}, } for task, task_info in pipeline_test_mapping.items(): test = task_info["test"] task_info["mapping"] = { "pt": getattr(test, "model_mapping", None), "tf": getattr(test, "tf_model_mapping", None), } # The default value `hf-internal-testing` is for running the pipeline testing against the tiny models on the Hub. # For debugging purpose, we can specify a local path which is the `output_path` argument of a previous run of # `utils/create_dummy_models.py`. TRANSFORMERS_TINY_MODEL_PATH = os.environ.get("TRANSFORMERS_TINY_MODEL_PATH", "hf-internal-testing") if TRANSFORMERS_TINY_MODEL_PATH == "hf-internal-testing": TINY_MODEL_SUMMARY_FILE_PATH = os.path.join(Path(__file__).parent.parent, "tests/utils/tiny_model_summary.json") else: TINY_MODEL_SUMMARY_FILE_PATH = os.path.join(TRANSFORMERS_TINY_MODEL_PATH, "reports", "tiny_model_summary.json") with open(TINY_MODEL_SUMMARY_FILE_PATH) as fp: tiny_model_summary = json.load(fp) PATH_TO_TRANSFORMERS = os.path.join(Path(__file__).parent.parent, "src/transformers") # Dynamically import the Transformers module to grab the attribute classes of the processor form their names. transformers_module = direct_transformers_import(PATH_TO_TRANSFORMERS) logger = logging.get_logger(__name__) class PipelineTesterMixin: model_tester = None pipeline_model_mapping = None supported_frameworks = ["pt", "tf"] def run_task_tests(self, task): """Run pipeline tests for a specific `task` Args: task (`str`): A task name. This should be a key in the mapping `pipeline_test_mapping`. """ if task not in self.pipeline_model_mapping: self.skipTest( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: `{task}` is not in " f"`self.pipeline_model_mapping` for `{self.__class__.__name__}`." ) model_architectures = self.pipeline_model_mapping[task] if not isinstance(model_architectures, tuple): model_architectures = (model_architectures,) if not isinstance(model_architectures, tuple): raise ValueError(f"`model_architectures` must be a tuple. Got {type(model_architectures)} instead.") for model_architecture in model_architectures: model_arch_name = model_architecture.__name__ # Get the canonical name for _prefix in ["Flax", "TF"]: if model_arch_name.startswith(_prefix): model_arch_name = model_arch_name[len(_prefix) :] break tokenizer_names = [] processor_names = [] commit = None if model_arch_name in tiny_model_summary: tokenizer_names = tiny_model_summary[model_arch_name]["tokenizer_classes"] processor_names = tiny_model_summary[model_arch_name]["processor_classes"] if "sha" in tiny_model_summary[model_arch_name]: commit = tiny_model_summary[model_arch_name]["sha"] # Adding `None` (if empty) so we can generate tests tokenizer_names = [None] if len(tokenizer_names) == 0 else tokenizer_names processor_names = [None] if len(processor_names) == 0 else processor_names repo_name = f"tiny-random-{model_arch_name}" if TRANSFORMERS_TINY_MODEL_PATH != "hf-internal-testing": repo_name = model_arch_name self.run_model_pipeline_tests( task, repo_name, model_architecture, tokenizer_names, processor_names, commit ) def run_model_pipeline_tests(self, task, repo_name, model_architecture, tokenizer_names, processor_names, commit): """Run pipeline tests for a specific `task` with the give model class and tokenizer/processor class names Args: task (`str`): A task name. This should be a key in the mapping `pipeline_test_mapping`. repo_name (`str`): A model repository id on the Hub. model_architecture (`type`): A subclass of `PretrainedModel` or `PretrainedModel`. tokenizer_names (`List[str]`): A list of names of a subclasses of `PreTrainedTokenizerFast` or `PreTrainedTokenizer`. processor_names (`List[str]`): A list of names of subclasses of `BaseImageProcessor` or `FeatureExtractionMixin`. """ # Get an instance of the corresponding class `XXXPipelineTests` in order to use `get_test_pipeline` and # `run_pipeline_test`. pipeline_test_class_name = pipeline_test_mapping[task]["test"].__name__ for tokenizer_name in tokenizer_names: for processor_name in processor_names: if self.is_pipeline_test_to_skip( pipeline_test_class_name, model_architecture.config_class, model_architecture, tokenizer_name, processor_name, ): logger.warning( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: test is " f"currently known to fail for: model `{model_architecture.__name__}` | tokenizer " f"`{tokenizer_name}` | processor `{processor_name}`." ) continue self.run_pipeline_test(task, repo_name, model_architecture, tokenizer_name, processor_name, commit) def run_pipeline_test(self, task, repo_name, model_architecture, tokenizer_name, processor_name, commit): """Run pipeline tests for a specific `task` with the give model class and tokenizer/processor class name The model will be loaded from a model repository on the Hub. Args: task (`str`): A task name. This should be a key in the mapping `pipeline_test_mapping`. repo_name (`str`): A model repository id on the Hub. model_architecture (`type`): A subclass of `PretrainedModel` or `PretrainedModel`. tokenizer_name (`str`): The name of a subclass of `PreTrainedTokenizerFast` or `PreTrainedTokenizer`. processor_name (`str`): The name of a subclass of `BaseImageProcessor` or `FeatureExtractionMixin`. """ repo_id = f"{TRANSFORMERS_TINY_MODEL_PATH}/{repo_name}" if TRANSFORMERS_TINY_MODEL_PATH != "hf-internal-testing": model_type = model_architecture.config_class.model_type repo_id = os.path.join(TRANSFORMERS_TINY_MODEL_PATH, model_type, repo_name) tokenizer = None if tokenizer_name is not None: tokenizer_class = getattr(transformers_module, tokenizer_name) tokenizer = tokenizer_class.from_pretrained(repo_id, revision=commit) processor = None if processor_name is not None: processor_class = getattr(transformers_module, processor_name) # If the required packages (like `Pillow` or `torchaudio`) are not installed, this will fail. try: processor = processor_class.from_pretrained(repo_id, revision=commit) except Exception: logger.warning( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: Could not load the " f"processor from `{repo_id}` with `{processor_name}`." ) return # TODO: Maybe not upload such problematic tiny models to Hub. if tokenizer is None and processor is None: logger.warning( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: Could not find or load " f"any tokenizer / processor from `{repo_id}`." ) return # TODO: We should check if a model file is on the Hub repo. instead. try: model = model_architecture.from_pretrained(repo_id, revision=commit) except Exception: logger.warning( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: Could not find or load " f"the model from `{repo_id}` with `{model_architecture}`." ) return pipeline_test_class_name = pipeline_test_mapping[task]["test"].__name__ if self.is_pipeline_test_to_skip_more(pipeline_test_class_name, model.config, model, tokenizer, processor): logger.warning( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: test is " f"currently known to fail for: model `{model_architecture.__name__}` | tokenizer " f"`{tokenizer_name}` | processor `{processor_name}`." ) return # validate validate_test_components(self, task, model, tokenizer, processor) if hasattr(model, "eval"): model = model.eval() # Get an instance of the corresponding class `XXXPipelineTests` in order to use `get_test_pipeline` and # `run_pipeline_test`. task_test = pipeline_test_mapping[task]["test"]() pipeline, examples = task_test.get_test_pipeline(model, tokenizer, processor) if pipeline is None: # The test can disable itself, but it should be very marginal # Concerns: Wav2Vec2ForCTC without tokenizer test (FastTokenizer don't exist) logger.warning( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: Could not get the " "pipeline for testing." ) return task_test.run_pipeline_test(pipeline, examples) def run_batch_test(pipeline, examples): # Need to copy because `Conversation` are stateful if pipeline.tokenizer is not None and pipeline.tokenizer.pad_token_id is None: return # No batching for this and it's OK # 10 examples with batch size 4 means there needs to be a unfinished batch # which is important for the unbatcher def data(n): for _ in range(n): # Need to copy because Conversation object is mutated yield copy.deepcopy(random.choice(examples)) out = [] for item in pipeline(data(10), batch_size=4): out.append(item) self.assertEqual(len(out), 10) run_batch_test(pipeline, examples) @is_pipeline_test def test_pipeline_audio_classification(self): self.run_task_tests(task="audio-classification") @is_pipeline_test def test_pipeline_automatic_speech_recognition(self): self.run_task_tests(task="automatic-speech-recognition") @is_pipeline_test def test_pipeline_conversational(self): self.run_task_tests(task="conversational") @is_pipeline_test @require_vision @require_timm @require_torch def test_pipeline_depth_estimation(self): self.run_task_tests(task="depth-estimation") @is_pipeline_test @require_pytesseract @require_torch @require_vision def test_pipeline_document_question_answering(self): self.run_task_tests(task="document-question-answering") @is_pipeline_test def test_pipeline_feature_extraction(self): self.run_task_tests(task="feature-extraction") @is_pipeline_test def test_pipeline_fill_mask(self): self.run_task_tests(task="fill-mask") @is_pipeline_test @require_torch_or_tf @require_vision def test_pipeline_image_classification(self): self.run_task_tests(task="image-classification") @is_pipeline_test @require_vision @require_timm @require_torch def test_pipeline_image_segmentation(self): self.run_task_tests(task="image-segmentation") @is_pipeline_test @require_vision def test_pipeline_image_to_text(self): self.run_task_tests(task="image-to-text") @unittest.skip(reason="`run_pipeline_test` is currently not implemented.") @is_pipeline_test @require_vision @require_torch def test_pipeline_mask_generation(self): self.run_task_tests(task="mask-generation") @is_pipeline_test @require_vision @require_timm @require_torch def test_pipeline_object_detection(self): self.run_task_tests(task="object-detection") @is_pipeline_test def test_pipeline_question_answering(self): self.run_task_tests(task="question-answering") @is_pipeline_test def test_pipeline_summarization(self): self.run_task_tests(task="summarization") @is_pipeline_test def test_pipeline_table_question_answering(self): self.run_task_tests(task="table-question-answering") @is_pipeline_test def test_pipeline_text2text_generation(self): self.run_task_tests(task="text2text-generation") @is_pipeline_test def test_pipeline_text_classification(self): self.run_task_tests(task="text-classification") @is_pipeline_test @require_torch_or_tf def test_pipeline_text_generation(self): self.run_task_tests(task="text-generation") @is_pipeline_test def test_pipeline_token_classification(self): self.run_task_tests(task="token-classification") @is_pipeline_test def test_pipeline_translation(self): self.run_task_tests(task="translation") @is_pipeline_test @require_torch_or_tf @require_vision @require_decord def test_pipeline_video_classification(self): self.run_task_tests(task="video-classification") @is_pipeline_test @require_torch @require_vision def test_pipeline_visual_question_answering(self): self.run_task_tests(task="visual-question-answering") @is_pipeline_test def test_pipeline_zero_shot(self): self.run_task_tests(task="zero-shot") @is_pipeline_test @require_torch def test_pipeline_zero_shot_audio_classification(self): self.run_task_tests(task="zero-shot-audio-classification") @is_pipeline_test @require_vision def test_pipeline_zero_shot_image_classification(self): self.run_task_tests(task="zero-shot-image-classification") @is_pipeline_test @require_vision @require_torch def test_pipeline_zero_shot_object_detection(self): self.run_task_tests(task="zero-shot-object-detection") # This contains the test cases to be skipped without model architecture being involved. def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): """Skip some tests based on the classes or their names without the instantiated objects. This is to avoid calling `from_pretrained` (so reducing the runtime) if we already know the tests will fail. """ # No fix is required for this case. if ( pipeline_test_casse_name == "DocumentQuestionAnsweringPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): # `DocumentQuestionAnsweringPipelineTests` requires a fast tokenizer. return True return False def is_pipeline_test_to_skip_more(self, pipeline_test_casse_name, config, model, tokenizer, processor): # noqa """Skip some more tests based on the information from the instantiated objects.""" # No fix is required for this case. if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer is not None and getattr(tokenizer, "pad_token", None) is None and not tokenizer.__class__.__name__.endswith("Fast") ): # `QAPipelineTests` doesn't work with a slow tokenizer that has no pad token. return True return False def validate_test_components(test_case, task, model, tokenizer, processor): # TODO: Move this to tiny model creation script # head-specific (within a model type) necessary changes to the config # 1. for `BlenderbotForCausalLM` if model.__class__.__name__ == "BlenderbotForCausalLM": model.config.encoder_no_repeat_ngram_size = 0 # TODO: Change the tiny model creation script: don't create models with problematic tokenizers # Avoid `IndexError` in embedding layers CONFIG_WITHOUT_VOCAB_SIZE = ["CanineConfig"] if tokenizer is not None: config_vocab_size = getattr(model.config, "vocab_size", None) # For CLIP-like models if config_vocab_size is None and hasattr(model.config, "text_config"): config_vocab_size = getattr(model.config.text_config, "vocab_size", None) if config_vocab_size is None and model.config.__class__.__name__ not in CONFIG_WITHOUT_VOCAB_SIZE: raise ValueError( "Could not determine `vocab_size` from model configuration while `tokenizer` is not `None`." )
22,373
43.569721
120
py
transformers
transformers-main/tests/test_tokenization_common.py
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import itertools import json import os import pickle import re import shutil import tempfile import traceback import unittest from collections import OrderedDict from itertools import takewhile from typing import TYPE_CHECKING, Any, Dict, List, Tuple, Union from parameterized import parameterized from transformers import ( AlbertTokenizer, AlbertTokenizerFast, BertTokenizer, BertTokenizerFast, PreTrainedTokenizer, PreTrainedTokenizerBase, PreTrainedTokenizerFast, SpecialTokensMixin, Trainer, TrainingArguments, is_flax_available, is_tf_available, is_torch_available, logging, ) from transformers.testing_utils import ( check_json_file_has_correct_format, get_tests_dir, is_pt_tf_cross_test, require_tf, require_tokenizers, require_torch, run_test_in_subprocess, slow, ) from transformers.tokenization_utils import AddedToken if is_torch_available(): import torch.nn as nn if TYPE_CHECKING: from transformers import PretrainedConfig, PreTrainedModel, TFPreTrainedModel logger = logging.get_logger(__name__) NON_ENGLISH_TAGS = ["chinese", "dutch", "french", "finnish", "german", "multilingual"] SMALL_TRAINING_CORPUS = [ ["This is the first sentence.", "This is the second one."], ["This sentence (contains #) over symbols and numbers 12 3.", "But not this one."], ] def filter_non_english(_, pretrained_name: str): """Filter all the model for non-english language""" return not any(lang in pretrained_name for lang in NON_ENGLISH_TAGS) def filter_roberta_detectors(_, pretrained_name: str): return "detector" not in pretrained_name def merge_model_tokenizer_mappings( model_mapping: Dict["PretrainedConfig", Union["PreTrainedModel", "TFPreTrainedModel"]], tokenizer_mapping: Dict["PretrainedConfig", Tuple["PreTrainedTokenizer", "PreTrainedTokenizerFast"]], ) -> Dict[ Union["PreTrainedTokenizer", "PreTrainedTokenizerFast"], Tuple["PretrainedConfig", Union["PreTrainedModel", "TFPreTrainedModel"]], ]: configurations = list(model_mapping.keys()) model_tokenizer_mapping = OrderedDict([]) for configuration in configurations: if configuration in model_mapping and configuration in tokenizer_mapping: model = model_mapping[configuration] tokenizer = tokenizer_mapping[configuration][0] tokenizer_fast = tokenizer_mapping[configuration][1] if tokenizer is not None: if configuration.__name__.startswith(tokenizer.__name__.replace("Tokenizer", "")): model_tokenizer_mapping.update({tokenizer: (configuration, model)}) if tokenizer_fast is not None: if configuration.__name__.startswith(tokenizer_fast.__name__.replace("TokenizerFast", "")): model_tokenizer_mapping.update({tokenizer_fast: (configuration, model)}) return model_tokenizer_mapping def _test_subword_regularization_tokenizer(in_queue, out_queue, timeout): error = None try: inputs = in_queue.get(timeout=timeout) tokenizer = inputs["tokenizer"] sp_model_kwargs = inputs["sp_model_kwargs"] test_sentencepiece_ignore_case = inputs["test_sentencepiece_ignore_case"] unittest.TestCase().assertTrue(hasattr(tokenizer, "sp_model_kwargs")) unittest.TestCase().assertIsNotNone(tokenizer.sp_model_kwargs) unittest.TestCase().assertTrue(isinstance(tokenizer.sp_model_kwargs, dict)) unittest.TestCase().assertDictEqual(tokenizer.sp_model_kwargs, sp_model_kwargs) check_subword_sampling(tokenizer, test_sentencepiece_ignore_case=test_sentencepiece_ignore_case) except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() def check_subword_sampling( tokenizer: PreTrainedTokenizer, text: str = None, test_sentencepiece_ignore_case: bool = True, ) -> None: """ Check if the tokenizer generates different results when subword regularization is enabled. Subword regularization augments training data with subword sampling. This has a random component. Args: tokenizer: The tokenizer to check. text: The text to use for the checks. test_sentencepiece_ignore_case: See `TokenizerTesterMixin.test_sentencepiece_ignore_case`. """ text = "This is a test for subword regularization." if text is None else text if test_sentencepiece_ignore_case: text = text.lower() tokens_list = [] for _ in range(5): tokens_list.append(tokenizer.tokenize(text)) # the list of different pairs of tokens_list combinations = itertools.combinations(tokens_list, 2) # check of sampling is done subword_sampling_found = False for combination in combinations: if combination[0] != combination[1]: subword_sampling_found = True unittest.TestCase().assertTrue(subword_sampling_found) # check if converting back to original text works for tokens in tokens_list: if test_sentencepiece_ignore_case: unittest.TestCase().assertEqual(text, tokenizer.convert_tokens_to_string(tokens).lower()) else: unittest.TestCase().assertEqual(text, tokenizer.convert_tokens_to_string(tokens)) class TokenizerTesterMixin: tokenizer_class = None rust_tokenizer_class = None test_slow_tokenizer = True test_rust_tokenizer = True space_between_special_tokens = False from_pretrained_kwargs = None from_pretrained_filter = None from_pretrained_vocab_key = "vocab_file" test_seq2seq = True # set to True to test a sentencepiece tokenizer test_sentencepiece = False # set to True to ignore casing when testing a sentencepiece tokenizer # test_sentencepiece must also be set to True test_sentencepiece_ignore_case = False def setUp(self) -> None: # Tokenizer.filter makes it possible to filter which Tokenizer to case based on all the # information available in Tokenizer (name, rust class, python class, vocab key name) if self.test_rust_tokenizer: tokenizers_list = [ ( self.rust_tokenizer_class, pretrained_name, self.from_pretrained_kwargs if self.from_pretrained_kwargs is not None else {}, ) for pretrained_name in self.rust_tokenizer_class.pretrained_vocab_files_map[ self.from_pretrained_vocab_key ].keys() if self.from_pretrained_filter is None or (self.from_pretrained_filter is not None and self.from_pretrained_filter(pretrained_name)) ] self.tokenizers_list = tokenizers_list[:1] # Let's just test the first pretrained vocab for speed else: self.tokenizers_list = [] with open(f"{get_tests_dir()}/fixtures/sample_text.txt", encoding="utf-8") as f_data: self._data = f_data.read().replace("\n\n", "\n").strip() self.tmpdirname = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.tmpdirname) def get_input_output_texts(self, tokenizer): input_txt = self.get_clean_sequence(tokenizer)[0] return input_txt, input_txt def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5) -> Tuple[str, list]: toks = [(i, tokenizer.decode([i], clean_up_tokenization_spaces=False)) for i in range(len(tokenizer))] toks = list(filter(lambda t: re.match(r"^[ a-zA-Z]+$", t[1]), toks)) toks = list(filter(lambda t: [t[0]] == tokenizer.encode(t[1], add_special_tokens=False), toks)) if max_length is not None and len(toks) > max_length: toks = toks[:max_length] if min_length is not None and len(toks) < min_length and len(toks) > 0: while len(toks) < min_length: toks = toks + toks # toks_str = [t[1] for t in toks] toks_ids = [t[0] for t in toks] # Ensure consistency output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False) if " " not in output_txt and len(toks_ids) > 1: output_txt = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False) + " " + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False) ) if with_prefix_space: output_txt = " " + output_txt output_ids = tokenizer.encode(output_txt, add_special_tokens=False) return output_txt, output_ids def get_tokenizers(self, fast=True, **kwargs) -> List[PreTrainedTokenizerBase]: if fast and self.test_rust_tokenizer and self.test_slow_tokenizer: return [self.get_tokenizer(**kwargs), self.get_rust_tokenizer(**kwargs)] elif fast and self.test_rust_tokenizer: return [self.get_rust_tokenizer(**kwargs)] elif self.test_slow_tokenizer: return [self.get_tokenizer(**kwargs)] else: raise ValueError("This tokenizer class has no tokenizer to be tested.") def get_tokenizer(self, **kwargs) -> PreTrainedTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_rust_tokenizer(self, **kwargs) -> PreTrainedTokenizerFast: return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def tokenizer_integration_test_util( self, expected_encoding: Dict, model_name: str, revision: str = None, sequences: List[str] = None, decode_kwargs: Dict[str, Any] = None, padding: bool = True, ): """ Util for integration test. Text is tokenized and then reverted back to text. Both results are then checked. Args: expected_encoding: The expected result of the tokenizer output. model_name: The model name of the tokenizer to load and use. revision: The full git revision number of the model. This is to pin the tokenizer config and to avoid that tests start to fail if the config gets changed upstream. sequences: Can overwrite the texts that are used to check the tokenizer. This is useful if the tokenizer supports non english languages like france. decode_kwargs: Additional args for the ``decode`` function which reverts the tokenized text back to a string. padding: Activates and controls padding of the tokenizer. """ decode_kwargs = {} if decode_kwargs is None else decode_kwargs if sequences is None: sequences = [ "Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides " "general-purpose architectures (BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet...) for Natural " "Language Understanding (NLU) and Natural Language Generation (NLG) with over 32+ pretrained " "models in 100+ languages and deep interoperability between Jax, PyTorch and TensorFlow.", "BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly " "conditioning on both left and right context in all layers.", "The quick brown fox jumps over the lazy dog.", ] if self.test_sentencepiece_ignore_case: sequences = [sequence.lower() for sequence in sequences] tokenizer_classes = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class) for tokenizer_class in tokenizer_classes: tokenizer = tokenizer_class.from_pretrained( model_name, revision=revision, # to pin the tokenizer version ) encoding = tokenizer(sequences, padding=padding) decoded_sequences = [ tokenizer.decode(seq, skip_special_tokens=True, **decode_kwargs) for seq in encoding["input_ids"] ] encoding_data = encoding.data self.assertDictEqual(encoding_data, expected_encoding) for expected, decoded in zip(sequences, decoded_sequences): if self.test_sentencepiece_ignore_case: expected = expected.lower() self.assertEqual(expected, decoded) def assert_padded_input_match(self, input_r: list, input_p: list, max_length: int, pad_token_id: int): # Ensure we match max_length self.assertEqual(len(input_r), max_length) self.assertEqual(len(input_p), max_length) # Ensure the number of padded tokens is the same padded_tokens_r = list(takewhile(lambda i: i == pad_token_id, reversed(input_r))) padded_tokens_p = list(takewhile(lambda i: i == pad_token_id, reversed(input_p))) self.assertSequenceEqual(padded_tokens_r, padded_tokens_p) def assert_batch_padded_input_match( self, input_r: dict, input_p: dict, max_length: int, pad_token_id: int, model_main_input_name: str = "input_ids", ): for i_r in input_r.values(): self.assertEqual(len(i_r), 2), self.assertEqual(len(i_r[0]), max_length), self.assertEqual( len(i_r[1]), max_length ) self.assertEqual(len(i_r), 2), self.assertEqual(len(i_r[0]), max_length), self.assertEqual( len(i_r[1]), max_length ) for i_r, i_p in zip(input_r[model_main_input_name], input_p[model_main_input_name]): self.assert_padded_input_match(i_r, i_p, max_length, pad_token_id) for i_r, i_p in zip(input_r["attention_mask"], input_p["attention_mask"]): self.assertSequenceEqual(i_r, i_p) @staticmethod def convert_batch_encode_plus_format_to_encode_plus(batch_encode_plus_sequences): # Switch from batch_encode_plus format: {'input_ids': [[...], [...]], ...} # to the list of examples/ encode_plus format: [{'input_ids': [...], ...}, {'input_ids': [...], ...}] return [ {value: batch_encode_plus_sequences[value][i] for value in batch_encode_plus_sequences.keys()} for i in range(len(batch_encode_plus_sequences["input_ids"])) ] # TODO: this test can be combined with `test_sentencepiece_tokenize_and_convert_tokens_to_string` after the latter is extended to all tokenizers. def test_tokenize_special_tokens(self): """Test `tokenize` with special tokens.""" tokenizers = self.get_tokenizers(fast=True, do_lower_case=True) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): SPECIAL_TOKEN_1 = "[SPECIAL_TOKEN_1]" SPECIAL_TOKEN_2 = "[SPECIAL_TOKEN_2]" # TODO: # Can we combine `unique_no_split_tokens` and `all_special_tokens`(and properties related to it) # with one variable(property) for a better maintainability? # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1], special_tokens=True) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]}) token_1 = tokenizer.tokenize(SPECIAL_TOKEN_1) token_2 = tokenizer.tokenize(SPECIAL_TOKEN_2) self.assertEqual(len(token_1), 1) self.assertEqual(len(token_2), 1) self.assertEqual(token_1[0], SPECIAL_TOKEN_1) self.assertEqual(token_2[0], SPECIAL_TOKEN_2) # TODO: this test could be extended to all tokenizers - not just the sentencepiece def test_sentencepiece_tokenize_and_convert_tokens_to_string(self): """Test ``_tokenize`` and ``convert_tokens_to_string``.""" if not self.test_sentencepiece: return tokenizer = self.get_tokenizer() text = "This is text to test the tokenizer." if self.test_sentencepiece_ignore_case: text = text.lower() tokens = tokenizer.tokenize(text) self.assertTrue(len(tokens) > 0) # check if converting back to original text works reverse_text = tokenizer.convert_tokens_to_string(tokens) if self.test_sentencepiece_ignore_case: reverse_text = reverse_text.lower() self.assertEqual(reverse_text, text) special_tokens = tokenizer.all_special_tokens special_tokens_string = tokenizer.convert_tokens_to_string(special_tokens) for special_token in special_tokens: self.assertIn(special_token, special_tokens_string) if self.test_rust_tokenizer: rust_tokenizer = self.get_rust_tokenizer() special_tokens_string_rust = rust_tokenizer.convert_tokens_to_string(special_tokens) self.assertEqual(special_tokens_string, special_tokens_string_rust) def test_sentencepiece_tokenize_and_decode(self): if not self.test_sentencepiece: return text = "This is text to test the tokenizer." if self.test_rust_tokenizer: tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() slow_ids = tokenizer(text).input_ids fast_ids = rust_tokenizer(text).input_ids self.assertEqual(slow_ids, fast_ids) slow_decoded = tokenizer.decode(slow_ids) fast_decoded = rust_tokenizer.decode(slow_ids) self.assertEqual(slow_decoded, fast_decoded) def test_subword_regularization_tokenizer(self) -> None: if not self.test_sentencepiece: return # Subword regularization is only available for the slow tokenizer. sp_model_kwargs = {"enable_sampling": True, "alpha": 0.1, "nbest_size": -1} tokenizer = self.get_tokenizer(sp_model_kwargs=sp_model_kwargs) run_test_in_subprocess( test_case=self, target_func=_test_subword_regularization_tokenizer, inputs={ "tokenizer": tokenizer, "sp_model_kwargs": sp_model_kwargs, "test_sentencepiece_ignore_case": self.test_sentencepiece_ignore_case, }, ) def test_pickle_subword_regularization_tokenizer(self) -> None: if not self.test_sentencepiece: return """Google pickle __getstate__ __setstate__ if you are struggling with this.""" # Subword regularization is only available for the slow tokenizer. sp_model_kwargs = {"enable_sampling": True, "alpha": 0.1, "nbest_size": -1} tokenizer = self.get_tokenizer(sp_model_kwargs=sp_model_kwargs) tokenizer_bin = pickle.dumps(tokenizer) del tokenizer tokenizer_new = pickle.loads(tokenizer_bin) run_test_in_subprocess( test_case=self, target_func=_test_subword_regularization_tokenizer, inputs={ "tokenizer": tokenizer_new, "sp_model_kwargs": sp_model_kwargs, "test_sentencepiece_ignore_case": self.test_sentencepiece_ignore_case, }, ) def test_save_sentencepiece_tokenizer(self) -> None: if not self.test_sentencepiece or not self.test_slow_tokenizer: return # We want to verify that we will be able to save the tokenizer even if the original files that were used to # build the tokenizer have been deleted in the meantime. text = "This is text to test the tokenizer." tokenizer_slow_1 = self.get_tokenizer() encoding_tokenizer_slow_1 = tokenizer_slow_1(text) tmpdirname_1 = tempfile.mkdtemp() tmpdirname_2 = tempfile.mkdtemp() tokenizer_slow_1.save_pretrained(tmpdirname_1) tokenizer_slow_2 = self.tokenizer_class.from_pretrained(tmpdirname_1) encoding_tokenizer_slow_2 = tokenizer_slow_2(text) shutil.rmtree(tmpdirname_1) tokenizer_slow_2.save_pretrained(tmpdirname_2) tokenizer_slow_3 = self.tokenizer_class.from_pretrained(tmpdirname_2) encoding_tokenizer_slow_3 = tokenizer_slow_3(text) shutil.rmtree(tmpdirname_2) self.assertEqual(encoding_tokenizer_slow_1, encoding_tokenizer_slow_2) self.assertEqual(encoding_tokenizer_slow_1, encoding_tokenizer_slow_3) def test_model_input_names_signature(self): accepted_model_main_input_names = [ "input_ids", # nlp models "input_values", # speech models ] tokenizers = self.get_tokenizers() for tokenizer in tokenizers: # first name of model_input_names has to correspond to main model input name # to make sure `tokenizer.pad(...)` works correctly self.assertTrue(tokenizer.model_input_names[0] in accepted_model_main_input_names) def test_rust_tokenizer_signature(self): if not self.test_rust_tokenizer: return signature = inspect.signature(self.rust_tokenizer_class.__init__) self.assertIn("tokenizer_file", signature.parameters) self.assertIsNone(signature.parameters["tokenizer_file"].default) def test_tokenizer_slow_store_full_signature(self): if not self.test_slow_tokenizer: return signature = inspect.signature(self.tokenizer_class.__init__) tokenizer = self.get_tokenizer() for parameter_name, parameter in signature.parameters.items(): if parameter.default != inspect.Parameter.empty: self.assertIn(parameter_name, tokenizer.init_kwargs) def test_tokenizer_fast_store_full_signature(self): if not self.test_rust_tokenizer: return signature = inspect.signature(self.rust_tokenizer_class.__init__) tokenizer = self.get_rust_tokenizer() for parameter_name, parameter in signature.parameters.items(): if parameter.default != inspect.Parameter.empty and parameter_name not in [ "vocab_file", "merges_file", "tokenizer_file", ]: self.assertIn(parameter_name, tokenizer.init_kwargs) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: return if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence, _ = self.get_input_output_texts(tokenizer) # We don't have an exact equivalence on `tokenize()` between Rust and Slow # Slow tokenizer only split tokens, Rust tokenizers will replace with <unk> # tokens = tokenizer.tokenize(sequence) # rust_tokens = rust_tokenizer.tokenize(sequence) # self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) ids = tokenizer.encode(sequence, add_special_tokens=True) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=True) self.assertListEqual(ids, rust_ids) def test_tokenizers_common_properties(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): attributes_list = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] for attr in attributes_list: self.assertTrue(hasattr(tokenizer, attr)) self.assertTrue(hasattr(tokenizer, attr + "_id")) self.assertTrue(hasattr(tokenizer, "additional_special_tokens")) self.assertTrue(hasattr(tokenizer, "additional_special_tokens_ids")) attributes_list = [ "model_max_length", "init_inputs", "init_kwargs", ] if not isinstance(tokenizer, PreTrainedTokenizerFast): attributes_list += [ "added_tokens_encoder", "added_tokens_decoder", ] for attr in attributes_list: self.assertTrue(hasattr(tokenizer, attr)) def test_tokenizers_common_ids_setters(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): attributes_list = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] vocab = tokenizer.get_vocab() token_id_to_test_setters = next(iter(vocab.values())) token_to_test_setters = tokenizer.convert_ids_to_tokens( token_id_to_test_setters, skip_special_tokens=False ) for attr in attributes_list: setattr(tokenizer, attr + "_id", None) self.assertEqual(getattr(tokenizer, attr), None) self.assertEqual(getattr(tokenizer, attr + "_id"), None) setattr(tokenizer, attr + "_id", token_id_to_test_setters) self.assertEqual(getattr(tokenizer, attr), token_to_test_setters) self.assertEqual(getattr(tokenizer, attr + "_id"), token_id_to_test_setters) setattr(tokenizer, "additional_special_tokens_ids", []) self.assertListEqual(getattr(tokenizer, "additional_special_tokens"), []) self.assertListEqual(getattr(tokenizer, "additional_special_tokens_ids"), []) setattr(tokenizer, "additional_special_tokens_ids", [token_id_to_test_setters]) self.assertListEqual(getattr(tokenizer, "additional_special_tokens"), [token_to_test_setters]) self.assertListEqual(getattr(tokenizer, "additional_special_tokens_ids"), [token_id_to_test_setters]) @parameterized.expand([(True,), (False,)]) def test_tokenizers_special_tokens_properties_unset(self, verbose): tokenizers = self.get_tokenizers(verbose=verbose) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): attributes_list = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", "additional_special_tokens", ] for attr in attributes_list: setattr(tokenizer, attr, None) self.assertIsNone(getattr(tokenizer, attr)) def test_save_and_load_tokenizer(self): # safety check on max_len default value so we are sure the test works tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): self.assertNotEqual(tokenizer.model_max_length, 42) # Now let's start the test tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Isolate this from the other tests because we save additional tokens/etc tmpdirname = tempfile.mkdtemp() sample_text = " He is very happy, UNwant\u00E9d,running" before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) before_vocab = tokenizer.get_vocab() tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) after_vocab = after_tokenizer.get_vocab() self.assertListEqual(before_tokens, after_tokens) self.assertDictEqual(before_vocab, after_vocab) shutil.rmtree(tmpdirname) tokenizers = self.get_tokenizers(model_max_length=42) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Isolate this from the other tests because we save additional tokens/etc tmpdirname = tempfile.mkdtemp() sample_text = " He is very happy, UNwant\u00E9d,running" tokenizer.add_tokens(["bim", "bambam"]) additional_special_tokens = tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token") tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens}) before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) before_vocab = tokenizer.get_vocab() tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) after_vocab = after_tokenizer.get_vocab() self.assertListEqual(before_tokens, after_tokens) self.assertDictEqual(before_vocab, after_vocab) self.assertIn("bim", after_vocab) self.assertIn("bambam", after_vocab) self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens) self.assertEqual(after_tokenizer.model_max_length, 42) tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43) self.assertEqual(tokenizer.model_max_length, 43) shutil.rmtree(tmpdirname) # Test that we can also use the non-legacy saving format for fast tokenizers tokenizers = self.get_tokenizers(model_max_length=42) for tokenizer in tokenizers: if not tokenizer.is_fast: continue with self.subTest(f"{tokenizer.__class__.__name__}"): # Isolate this from the other tests because we save additional tokens/etc tmpdirname = tempfile.mkdtemp() sample_text = " He is very happy, UNwant\u00E9d,running" tokenizer.add_tokens(["bim", "bambam"]) additional_special_tokens = tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token") tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens}) before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) before_vocab = tokenizer.get_vocab() tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) after_vocab = after_tokenizer.get_vocab() self.assertListEqual(before_tokens, after_tokens) self.assertDictEqual(before_vocab, after_vocab) self.assertIn("bim", after_vocab) self.assertIn("bambam", after_vocab) self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens) self.assertEqual(after_tokenizer.model_max_length, 42) tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43) self.assertEqual(tokenizer.model_max_length, 43) shutil.rmtree(tmpdirname) def test_pickle_tokenizer(self): """Google pickle __getstate__ __setstate__ if you are struggling with this.""" tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): self.assertIsNotNone(tokenizer) text = "Munich and Berlin are nice cities" subwords = tokenizer.tokenize(text) filename = os.path.join(self.tmpdirname, "tokenizer.bin") with open(filename, "wb") as handle: pickle.dump(tokenizer, handle) with open(filename, "rb") as handle: tokenizer_new = pickle.load(handle) subwords_loaded = tokenizer_new.tokenize(text) self.assertListEqual(subwords, subwords_loaded) @require_tokenizers def test_pickle_added_tokens(self): tok1 = AddedToken("<s>", rstrip=True, lstrip=True, normalized=False, single_word=True) tok2 = pickle.loads(pickle.dumps(tok1)) self.assertEqual(tok1.__getstate__(), tok2.__getstate__()) def test_added_tokens_do_lower_case(self): tokenizers = self.get_tokenizers(do_lower_case=True) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if not hasattr(tokenizer, "do_lower_case") or not tokenizer.do_lower_case: continue special_token = tokenizer.all_special_tokens[0] text = special_token + " aaaaa bbbbbb low cccccccccdddddddd l " + special_token text2 = special_token + " AAAAA BBBBBB low CCCCCCCCCDDDDDDDD l " + special_token toks_before_adding = tokenizer.tokenize(text) # toks before adding new_toks new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd", "AAAAA BBBBBB", "CCCCCCCCCDDDDDDDD"] added = tokenizer.add_tokens([AddedToken(tok, lstrip=True, rstrip=True) for tok in new_toks]) toks_after_adding = tokenizer.tokenize(text) toks_after_adding2 = tokenizer.tokenize(text2) # Rust tokenizers dont't lowercase added tokens at the time calling `tokenizer.add_tokens`, # while python tokenizers do, so new_toks 0 and 2 would be treated as the same, so do new_toks 1 and 3. self.assertIn(added, [2, 4]) self.assertListEqual(toks_after_adding, toks_after_adding2) self.assertTrue( len(toks_before_adding) > len(toks_after_adding), # toks_before_adding should be longer ) # Check that none of the special tokens are lowercased sequence_with_special_tokens = "A " + " yEs ".join(tokenizer.all_special_tokens) + " B" # Convert the tokenized list to str as some special tokens are tokenized like normal tokens # which have a prefix spacee e.g. the mask token of Albert, and cannot match the original # special tokens exactly. tokenized_sequence = "".join(tokenizer.tokenize(sequence_with_special_tokens)) for special_token in tokenizer.all_special_tokens: self.assertTrue(special_token in tokenized_sequence) tokenizers = self.get_tokenizers(do_lower_case=True) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if hasattr(tokenizer, "do_lower_case") and tokenizer.do_lower_case: continue special_token = tokenizer.all_special_tokens[0] text = special_token + " aaaaa bbbbbb low cccccccccdddddddd l " + special_token text2 = special_token + " AAAAA BBBBBB low CCCCCCCCCDDDDDDDD l " + special_token toks_before_adding = tokenizer.tokenize(text) # toks before adding new_toks new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd", "AAAAA BBBBBB", "CCCCCCCCCDDDDDDDD"] added = tokenizer.add_tokens([AddedToken(tok, lstrip=True, rstrip=True) for tok in new_toks]) self.assertIn(added, [2, 4]) toks_after_adding = tokenizer.tokenize(text) toks_after_adding2 = tokenizer.tokenize(text2) self.assertEqual(len(toks_after_adding), len(toks_after_adding2)) # Length should still be the same self.assertNotEqual( toks_after_adding[1], toks_after_adding2[1] ) # But at least the first non-special tokens should differ self.assertTrue( len(toks_before_adding) > len(toks_after_adding), # toks_before_adding should be longer ) def test_add_tokens_tokenizer(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): vocab_size = tokenizer.vocab_size all_size = len(tokenizer) self.assertNotEqual(vocab_size, 0) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd"] added_toks = tokenizer.add_tokens(new_toks) vocab_size_2 = tokenizer.vocab_size all_size_2 = len(tokenizer) self.assertNotEqual(vocab_size_2, 0) self.assertEqual(vocab_size, vocab_size_2) self.assertEqual(added_toks, len(new_toks)) self.assertEqual(all_size_2, all_size + len(new_toks)) tokens = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l", add_special_tokens=False) self.assertGreaterEqual(len(tokens), 4) self.assertGreater(tokens[0], tokenizer.vocab_size - 1) self.assertGreater(tokens[-2], tokenizer.vocab_size - 1) new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} added_toks_2 = tokenizer.add_special_tokens(new_toks_2) vocab_size_3 = tokenizer.vocab_size all_size_3 = len(tokenizer) self.assertNotEqual(vocab_size_3, 0) self.assertEqual(vocab_size, vocab_size_3) self.assertEqual(added_toks_2, len(new_toks_2)) self.assertEqual(all_size_3, all_size_2 + len(new_toks_2)) tokens = tokenizer.encode( ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l", add_special_tokens=False ) self.assertGreaterEqual(len(tokens), 6) self.assertGreater(tokens[0], tokenizer.vocab_size - 1) self.assertGreater(tokens[0], tokens[1]) self.assertGreater(tokens[-2], tokenizer.vocab_size - 1) self.assertGreater(tokens[-2], tokens[-3]) self.assertEqual(tokens[0], tokenizer.eos_token_id) self.assertEqual(tokens[-2], tokenizer.pad_token_id) def test_add_special_tokens(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): input_text, ids = self.get_clean_sequence(tokenizer) special_token = "[SPECIAL_TOKEN]" tokenizer.add_special_tokens({"cls_token": special_token}) encoded_special_token = tokenizer.encode(special_token, add_special_tokens=False) self.assertEqual(len(encoded_special_token), 1) text = tokenizer.decode(ids + encoded_special_token, clean_up_tokenization_spaces=False) encoded = tokenizer.encode(text, add_special_tokens=False) input_encoded = tokenizer.encode(input_text, add_special_tokens=False) special_token_id = tokenizer.encode(special_token, add_special_tokens=False) self.assertEqual(encoded, input_encoded + special_token_id) decoded = tokenizer.decode(encoded, skip_special_tokens=True) self.assertTrue(special_token not in decoded) def test_internal_consistency(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): input_text, output_text = self.get_input_output_texts(tokenizer) tokens = tokenizer.tokenize(input_text) ids = tokenizer.convert_tokens_to_ids(tokens) ids_2 = tokenizer.encode(input_text, add_special_tokens=False) self.assertListEqual(ids, ids_2) tokens_2 = tokenizer.convert_ids_to_tokens(ids) self.assertNotEqual(len(tokens_2), 0) text_2 = tokenizer.decode(ids) self.assertIsInstance(text_2, str) self.assertEqual(text_2, output_text) @require_tokenizers def test_encode_decode_with_spaces(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): new_toks = [ AddedToken("[ABC]", normalized=False), AddedToken("[DEF]", normalized=False), AddedToken("GHI IHG", normalized=False), ] tokenizer.add_tokens(new_toks) input = "[ABC][DEF][ABC]GHI IHG[DEF]" if self.space_between_special_tokens: output = "[ABC] [DEF] [ABC] GHI IHG [DEF]" else: output = input encoded = tokenizer.encode(input, add_special_tokens=False) decoded = tokenizer.decode(encoded, spaces_between_special_tokens=self.space_between_special_tokens) self.assertIn(decoded, [output, output.lower()]) def test_pretrained_model_lists(self): # We should have at least one default checkpoint for each tokenizer # We should specify the max input length as well (used in some part to list the pretrained checkpoints) self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map), 1) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]), 1) self.assertEqual( len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]), len(self.tokenizer_class.max_model_input_sizes), ) weights_list = list(self.tokenizer_class.max_model_input_sizes.keys()) weights_lists_2 = [] for file_id, map_list in self.tokenizer_class.pretrained_vocab_files_map.items(): weights_lists_2.append(list(map_list.keys())) for weights_list_2 in weights_lists_2: self.assertListEqual(weights_list, weights_list_2) def test_mask_output(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if ( tokenizer.build_inputs_with_special_tokens.__qualname__.split(".")[0] != "PreTrainedTokenizer" and "token_type_ids" in tokenizer.model_input_names ): seq_0 = "Test this method." seq_1 = "With these inputs." information = tokenizer.encode_plus(seq_0, seq_1, add_special_tokens=True) sequences, mask = information["input_ids"], information["token_type_ids"] self.assertEqual(len(sequences), len(mask)) def test_token_type_ids(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): seq_0 = "Test this method." # We want to have sequence 0 and sequence 1 are tagged # respectively with 0 and 1 token_ids # (regardless of whether the model use token type ids) # We use this assumption in the QA pipeline among other place output = tokenizer(seq_0, return_token_type_ids=True) self.assertIn(0, output["token_type_ids"]) def test_sequence_ids(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: if not tokenizer.is_fast: continue with self.subTest(f"{tokenizer.__class__.__name__}"): seq_0 = "Test this method." seq_1 = "With these inputs." # We want to have sequence 0 and sequence 1 are tagged # respectively with 0 and 1 token_ids # (regardless of whether the model use token type ids) # We use this assumption in the QA pipeline among other place output = tokenizer(seq_0) self.assertIn(0, output.sequence_ids()) output = tokenizer(seq_0, seq_1) self.assertIn(0, output.sequence_ids()) self.assertIn(1, output.sequence_ids()) if tokenizer.num_special_tokens_to_add(pair=True): self.assertIn(None, output.sequence_ids()) def test_number_of_added_tokens(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): seq_0 = "Test this method." seq_1 = "With these inputs." sequences = tokenizer.encode(seq_0, seq_1, add_special_tokens=False) attached_sequences = tokenizer.encode(seq_0, seq_1, add_special_tokens=True) # Method is implemented (e.g. not GPT-2) if len(attached_sequences) != 2: self.assertEqual( tokenizer.num_special_tokens_to_add(pair=True), len(attached_sequences) - len(sequences) ) def test_maximum_encoding_length_single_input(self): tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): seq_0, ids = self.get_clean_sequence(tokenizer, max_length=20) sequence = tokenizer.encode(seq_0, add_special_tokens=False) total_length = len(sequence) self.assertGreater( total_length, 4, "Issue with the testing sequence, please update it, it's too short" ) # Test with max model input length model_max_length = tokenizer.model_max_length self.assertEqual(model_max_length, 100) seq_1 = seq_0 * model_max_length sequence1 = tokenizer(seq_1, add_special_tokens=False) total_length1 = len(sequence1["input_ids"]) self.assertGreater( total_length1, model_max_length, "Issue with the testing sequence, please update it, it's too short", ) # Simple padding_strategies = ( [False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False] ) for padding_state in padding_strategies: with self.subTest(f"Padding: {padding_state}"): for truncation_state in [True, "longest_first", "only_first"]: with self.subTest(f"Truncation: {truncation_state}"): output = tokenizer(seq_1, padding=padding_state, truncation=truncation_state) self.assertEqual(len(output["input_ids"]), model_max_length) output = tokenizer([seq_1], padding=padding_state, truncation=truncation_state) self.assertEqual(len(output["input_ids"][0]), model_max_length) # Simple with no truncation # Reset warnings tokenizer.deprecation_warnings = {} with self.assertLogs("transformers", level="WARNING") as cm: output = tokenizer(seq_1, padding=padding_state, truncation=False) self.assertNotEqual(len(output["input_ids"]), model_max_length) self.assertEqual(len(cm.records), 1) self.assertTrue( cm.records[0].message.startswith( "Token indices sequence length is longer than the specified maximum sequence length" " for this model" ) ) tokenizer.deprecation_warnings = {} with self.assertLogs("transformers", level="WARNING") as cm: output = tokenizer([seq_1], padding=padding_state, truncation=False) self.assertNotEqual(len(output["input_ids"][0]), model_max_length) self.assertEqual(len(cm.records), 1) self.assertTrue( cm.records[0].message.startswith( "Token indices sequence length is longer than the specified maximum sequence length" " for this model" ) ) # Overflowing tokens stride = 2 information = tokenizer( seq_0, max_length=total_length - 2, add_special_tokens=False, stride=stride, truncation="longest_first", return_overflowing_tokens=True, # add_prefix_space=False, ) # Overflowing tokens are handled quite differently in slow and fast tokenizers if isinstance(tokenizer, PreTrainedTokenizerFast): truncated_sequence = information["input_ids"][0] overflowing_tokens = information["input_ids"][1] self.assertEqual(len(information["input_ids"]), 2) self.assertEqual(len(truncated_sequence), total_length - 2) self.assertEqual(truncated_sequence, sequence[:-2]) self.assertEqual(len(overflowing_tokens), 2 + stride) self.assertEqual(overflowing_tokens, sequence[-(2 + stride) :]) else: truncated_sequence = information["input_ids"] overflowing_tokens = information["overflowing_tokens"] self.assertEqual(len(truncated_sequence), total_length - 2) self.assertEqual(truncated_sequence, sequence[:-2]) self.assertEqual(len(overflowing_tokens), 2 + stride) self.assertEqual(overflowing_tokens, sequence[-(2 + stride) :]) def test_maximum_encoding_length_pair_input(self): tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Build a sequence from our model's vocabulary stride = 2 seq_0, ids = self.get_clean_sequence(tokenizer, max_length=20) if len(ids) <= 2 + stride: seq_0 = (seq_0 + " ") * (2 + stride) ids = None seq0_tokens = tokenizer.encode(seq_0, add_special_tokens=False) self.assertGreater(len(seq0_tokens), 2 + stride) seq_1 = "This is another sentence to be encoded." seq1_tokens = tokenizer.encode(seq_1, add_special_tokens=False) if abs(len(seq0_tokens) - len(seq1_tokens)) <= 2: seq1_tokens = seq1_tokens + seq1_tokens seq_1 = tokenizer.decode(seq1_tokens, clean_up_tokenization_spaces=False) seq1_tokens = tokenizer.encode(seq_1, add_special_tokens=False) self.assertGreater(len(seq1_tokens), 2 + stride) smallest = seq1_tokens if len(seq0_tokens) > len(seq1_tokens) else seq0_tokens # We are not using the special tokens - a bit too hard to test all the tokenizers with this # TODO try this again later sequence = tokenizer.encode(seq_0, seq_1, add_special_tokens=False) # , add_prefix_space=False) # Test with max model input length model_max_length = tokenizer.model_max_length self.assertEqual(model_max_length, 100) seq_2 = seq_0 * model_max_length self.assertGreater(len(seq_2), model_max_length) sequence1 = tokenizer(seq_1, add_special_tokens=False) total_length1 = len(sequence1["input_ids"]) sequence2 = tokenizer(seq_2, seq_1, add_special_tokens=False) total_length2 = len(sequence2["input_ids"]) self.assertLess( total_length1, model_max_length - 10, "Issue with the testing sequence, please update it." ) self.assertGreater( total_length2, model_max_length, "Issue with the testing sequence, please update it." ) # Simple padding_strategies = ( [False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False] ) for padding_state in padding_strategies: with self.subTest(f"{tokenizer.__class__.__name__} Padding: {padding_state}"): for truncation_state in [True, "longest_first", "only_first"]: with self.subTest(f"{tokenizer.__class__.__name__} Truncation: {truncation_state}"): output = tokenizer(seq_2, seq_1, padding=padding_state, truncation=truncation_state) self.assertEqual(len(output["input_ids"]), model_max_length) output = tokenizer( [seq_2], [seq_1], padding=padding_state, truncation=truncation_state ) self.assertEqual(len(output["input_ids"][0]), model_max_length) # Simple output = tokenizer(seq_1, seq_2, padding=padding_state, truncation="only_second") self.assertEqual(len(output["input_ids"]), model_max_length) output = tokenizer([seq_1], [seq_2], padding=padding_state, truncation="only_second") self.assertEqual(len(output["input_ids"][0]), model_max_length) # Simple with no truncation # Reset warnings tokenizer.deprecation_warnings = {} with self.assertLogs("transformers", level="WARNING") as cm: output = tokenizer(seq_1, seq_2, padding=padding_state, truncation=False) self.assertNotEqual(len(output["input_ids"]), model_max_length) self.assertEqual(len(cm.records), 1) self.assertTrue( cm.records[0].message.startswith( "Token indices sequence length is longer than the specified maximum sequence length" " for this model" ) ) tokenizer.deprecation_warnings = {} with self.assertLogs("transformers", level="WARNING") as cm: output = tokenizer([seq_1], [seq_2], padding=padding_state, truncation=False) self.assertNotEqual(len(output["input_ids"][0]), model_max_length) self.assertEqual(len(cm.records), 1) self.assertTrue( cm.records[0].message.startswith( "Token indices sequence length is longer than the specified maximum sequence length" " for this model" ) ) truncated_first_sequence = tokenizer.encode(seq_0, add_special_tokens=False)[:-2] + tokenizer.encode( seq_1, add_special_tokens=False ) truncated_second_sequence = ( tokenizer.encode(seq_0, add_special_tokens=False) + tokenizer.encode(seq_1, add_special_tokens=False)[:-2] ) truncated_longest_sequence = ( truncated_first_sequence if len(seq0_tokens) > len(seq1_tokens) else truncated_second_sequence ) overflow_first_sequence = tokenizer.encode(seq_0, add_special_tokens=False)[ -(2 + stride) : ] + tokenizer.encode(seq_1, add_special_tokens=False) overflow_second_sequence = ( tokenizer.encode(seq_0, add_special_tokens=False) + tokenizer.encode(seq_1, add_special_tokens=False)[-(2 + stride) :] ) overflow_longest_sequence = ( overflow_first_sequence if len(seq0_tokens) > len(seq1_tokens) else overflow_second_sequence ) # Overflowing tokens are handled quite differently in slow and fast tokenizers if isinstance(tokenizer, PreTrainedTokenizerFast): information = tokenizer( seq_0, seq_1, max_length=len(sequence) - 2, add_special_tokens=False, stride=stride, truncation="longest_first", return_overflowing_tokens=True, # add_prefix_space=False, ) truncated_sequence = information["input_ids"][0] overflowing_tokens = information["input_ids"][1] self.assertEqual(len(information["input_ids"]), 2) self.assertEqual(len(truncated_sequence), len(sequence) - 2) self.assertEqual(truncated_sequence, truncated_longest_sequence) self.assertEqual(len(overflowing_tokens), 2 + stride + len(smallest)) self.assertEqual(overflowing_tokens, overflow_longest_sequence) else: # No overflowing tokens when using 'longest' in python tokenizers with self.assertRaises(ValueError) as context: information = tokenizer( seq_0, seq_1, max_length=len(sequence) - 2, add_special_tokens=False, stride=stride, truncation="longest_first", return_overflowing_tokens=True, # add_prefix_space=False, ) self.assertTrue( context.exception.args[0].startswith( "Not possible to return overflowing tokens for pair of sequences with the " "`longest_first`. Please select another truncation strategy than `longest_first`, " "for instance `only_second` or `only_first`." ) ) # Overflowing tokens are handled quite differently in slow and fast tokenizers if isinstance(tokenizer, PreTrainedTokenizerFast): information = tokenizer( seq_0, seq_1, max_length=len(sequence) - 2, add_special_tokens=False, stride=stride, truncation=True, return_overflowing_tokens=True, # add_prefix_space=False, ) truncated_sequence = information["input_ids"][0] overflowing_tokens = information["input_ids"][1] self.assertEqual(len(information["input_ids"]), 2) self.assertEqual(len(truncated_sequence), len(sequence) - 2) self.assertEqual(truncated_sequence, truncated_longest_sequence) self.assertEqual(len(overflowing_tokens), 2 + stride + len(smallest)) self.assertEqual(overflowing_tokens, overflow_longest_sequence) else: # No overflowing tokens when using 'longest' in python tokenizers with self.assertRaises(ValueError) as context: information = tokenizer( seq_0, seq_1, max_length=len(sequence) - 2, add_special_tokens=False, stride=stride, truncation=True, return_overflowing_tokens=True, # add_prefix_space=False, ) self.assertTrue( context.exception.args[0].startswith( "Not possible to return overflowing tokens for pair of sequences with the " "`longest_first`. Please select another truncation strategy than `longest_first`, " "for instance `only_second` or `only_first`." ) ) information_first_truncated = tokenizer( seq_0, seq_1, max_length=len(sequence) - 2, add_special_tokens=False, stride=stride, truncation="only_first", return_overflowing_tokens=True, # add_prefix_space=False, ) # Overflowing tokens are handled quite differently in slow and fast tokenizers if isinstance(tokenizer, PreTrainedTokenizerFast): truncated_sequence = information_first_truncated["input_ids"][0] overflowing_tokens = information_first_truncated["input_ids"][1] self.assertEqual(len(information_first_truncated["input_ids"]), 2) self.assertEqual(len(truncated_sequence), len(sequence) - 2) self.assertEqual(truncated_sequence, truncated_first_sequence) self.assertEqual(len(overflowing_tokens), 2 + stride + len(seq1_tokens)) self.assertEqual(overflowing_tokens, overflow_first_sequence) else: truncated_sequence = information_first_truncated["input_ids"] overflowing_tokens = information_first_truncated["overflowing_tokens"] self.assertEqual(len(truncated_sequence), len(sequence) - 2) self.assertEqual(truncated_sequence, truncated_first_sequence) self.assertEqual(len(overflowing_tokens), 2 + stride) self.assertEqual(overflowing_tokens, seq0_tokens[-(2 + stride) :]) information_second_truncated = tokenizer( seq_0, seq_1, max_length=len(sequence) - 2, add_special_tokens=False, stride=stride, truncation="only_second", return_overflowing_tokens=True, # add_prefix_space=False, ) # Overflowing tokens are handled quite differently in slow and fast tokenizers if isinstance(tokenizer, PreTrainedTokenizerFast): truncated_sequence = information_second_truncated["input_ids"][0] overflowing_tokens = information_second_truncated["input_ids"][1] self.assertEqual(len(information_second_truncated["input_ids"]), 2) self.assertEqual(len(truncated_sequence), len(sequence) - 2) self.assertEqual(truncated_sequence, truncated_second_sequence) self.assertEqual(len(overflowing_tokens), 2 + stride + len(seq0_tokens)) self.assertEqual(overflowing_tokens, overflow_second_sequence) else: truncated_sequence = information_second_truncated["input_ids"] overflowing_tokens = information_second_truncated["overflowing_tokens"] self.assertEqual(len(truncated_sequence), len(sequence) - 2) self.assertEqual(truncated_sequence, truncated_second_sequence) self.assertEqual(len(overflowing_tokens), 2 + stride) self.assertEqual(overflowing_tokens, seq1_tokens[-(2 + stride) :]) # def test_encode_input_type(self): # tokenizers = self.get_tokenizers(do_lower_case=False) # for tokenizer in tokenizers: # with self.subTest(f"{tokenizer.__class__.__name__}"): # sequence = "Let's encode this sequence" # tokens = sequence.split() # tokenizer.tokenize(sequence) # # input_ids = tokenizer.convert_tokens_to_ids(tokens) # formatted_input = tokenizer.encode(sequence, add_special_tokens=True, add_prefix_space=False) # self.assertEqual( # tokenizer.encode(tokens, is_split_into_words=True, add_special_tokens=True), formatted_input # ) # # This is not supported with the Rust tokenizers # # self.assertEqual(tokenizer.encode(input_ids, add_special_tokens=True), formatted_input) # def test_swap_special_token(self): # tokenizers = self.get_tokenizers(do_lower_case=False) # for tokenizer in tokenizers: # with self.subTest(f"{tokenizer.__class__.__name__}"): # # Our mask token # mask = "<mask>" # # We take a single word in the middle of the vocabulary # all_tokens = sorted(tokenizer.get_vocab().keys()) # word = tokenizer.decode(tokenizer.encode(all_tokens[len(all_tokens)//2], add_special_tokens=False)[:1]) # sequence_0 = "Encode " + word + " sequence" # sequence_masked_0 = "Encode " + mask + " sequence" # sequence_1 = word + " this sequence" # sequence_masked_1 = mask + " this sequence" # # Add tokens so that masked token isn't split # # tokens = [AddedToken(t, lstrip=True, normalized=False) for t in sequence.split()] # # tokenizer.add_tokens(tokens) # tokenizer.add_special_tokens( # {"mask_token": AddedToken(mask, normalized=False)} # ) # Eat left space on Byte-level BPE tokenizers # mask_ind = tokenizer.convert_tokens_to_ids(mask) # # Test first masked sequence # encoded_0 = tokenizer.encode(sequence_0, add_special_tokens=False) # encoded_masked = tokenizer.encode(sequence_masked_0, add_special_tokens=False) # self.assertEqual(len(encoded_masked), len(encoded_0)) # mask_loc = encoded_masked.index(mask_ind) # encoded_masked[mask_loc] = encoded_0[mask_loc] # self.assertEqual(encoded_masked, encoded_0) # # Test second masked sequence # encoded_1 = tokenizer.encode(sequence_1, add_special_tokens=False) # encoded_masked = tokenizer.encode(sequence_masked_1, add_special_tokens=False) # self.assertEqual(len(encoded_masked), len(encoded_1)) # mask_loc = encoded_masked.index(mask_ind) # encoded_masked[mask_loc] = encoded_1[mask_loc] # self.assertEqual(encoded_masked, encoded_1) def test_special_tokens_mask(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): sequence_0 = "Encode this." # Testing single inputs encoded_sequence = tokenizer.encode(sequence_0, add_special_tokens=False) encoded_sequence_dict = tokenizer.encode_plus( sequence_0, add_special_tokens=True, return_special_tokens_mask=True # , add_prefix_space=False ) encoded_sequence_w_special = encoded_sequence_dict["input_ids"] special_tokens_mask = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special)) filtered_sequence = [x for i, x in enumerate(encoded_sequence_w_special) if not special_tokens_mask[i]] self.assertEqual(encoded_sequence, filtered_sequence) def test_special_tokens_mask_input_pairs(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): sequence_0 = "Encode this." sequence_1 = "This one too please." encoded_sequence = tokenizer.encode(sequence_0, add_special_tokens=False) encoded_sequence += tokenizer.encode(sequence_1, add_special_tokens=False) encoded_sequence_dict = tokenizer.encode_plus( sequence_0, sequence_1, add_special_tokens=True, return_special_tokens_mask=True, # add_prefix_space=False, ) encoded_sequence_w_special = encoded_sequence_dict["input_ids"] special_tokens_mask = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special)) filtered_sequence = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special) ] filtered_sequence = [x for x in filtered_sequence if x is not None] self.assertEqual(encoded_sequence, filtered_sequence) def test_padding_side_in_kwargs(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): if self.test_rust_tokenizer: tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, padding_side="left", **kwargs ) self.assertEqual(tokenizer_r.padding_side, "left") tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, padding_side="right", **kwargs ) self.assertEqual(tokenizer_r.padding_side, "right") self.assertRaises( ValueError, self.rust_tokenizer_class.from_pretrained, pretrained_name, padding_side="unauthorized", **kwargs, ) if self.test_slow_tokenizer: tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, padding_side="left", **kwargs) self.assertEqual(tokenizer_p.padding_side, "left") tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, padding_side="right", **kwargs) self.assertEqual(tokenizer_p.padding_side, "right") self.assertRaises( ValueError, self.tokenizer_class.from_pretrained, pretrained_name, padding_side="unauthorized", **kwargs, ) def test_truncation_side_in_kwargs(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): if self.test_rust_tokenizer: tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, truncation_side="left", **kwargs ) self.assertEqual(tokenizer_r.truncation_side, "left") tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, truncation_side="right", **kwargs ) self.assertEqual(tokenizer_r.truncation_side, "right") self.assertRaises( ValueError, self.rust_tokenizer_class.from_pretrained, pretrained_name, truncation_side="unauthorized", **kwargs, ) if self.test_slow_tokenizer: tokenizer_p = self.tokenizer_class.from_pretrained( pretrained_name, truncation_side="left", **kwargs ) self.assertEqual(tokenizer_p.truncation_side, "left") tokenizer_p = self.tokenizer_class.from_pretrained( pretrained_name, truncation_side="right", **kwargs ) self.assertEqual(tokenizer_p.truncation_side, "right") self.assertRaises( ValueError, self.tokenizer_class.from_pretrained, pretrained_name, truncation_side="unauthorized", **kwargs, ) def test_right_and_left_padding(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): sequence = "Sequence" padding_size = 10 # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer, sequence) padding_idx = tokenizer.pad_token_id # RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True tokenizer.padding_side = "right" encoded_sequence = tokenizer.encode(sequence) sequence_length = len(encoded_sequence) padded_sequence = tokenizer.encode( sequence, max_length=sequence_length + padding_size, padding="max_length" ) padded_sequence_length = len(padded_sequence) self.assertEqual(sequence_length + padding_size, padded_sequence_length) self.assertEqual(encoded_sequence + [padding_idx] * padding_size, padded_sequence) # LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True tokenizer.padding_side = "left" encoded_sequence = tokenizer.encode(sequence) sequence_length = len(encoded_sequence) padded_sequence = tokenizer.encode( sequence, max_length=sequence_length + padding_size, padding="max_length" ) padded_sequence_length = len(padded_sequence) self.assertEqual(sequence_length + padding_size, padded_sequence_length) self.assertEqual([padding_idx] * padding_size + encoded_sequence, padded_sequence) # RIGHT & LEFT PADDING - Check that nothing is done for 'longest' and 'no_padding' encoded_sequence = tokenizer.encode(sequence) sequence_length = len(encoded_sequence) tokenizer.padding_side = "right" padded_sequence_right = tokenizer.encode(sequence, padding=True) padded_sequence_right_length = len(padded_sequence_right) self.assertEqual(sequence_length, padded_sequence_right_length) self.assertEqual(encoded_sequence, padded_sequence_right) tokenizer.padding_side = "left" padded_sequence_left = tokenizer.encode(sequence, padding="longest") padded_sequence_left_length = len(padded_sequence_left) self.assertEqual(sequence_length, padded_sequence_left_length) self.assertEqual(encoded_sequence, padded_sequence_left) tokenizer.padding_side = "right" padded_sequence_right = tokenizer.encode(sequence) padded_sequence_right_length = len(padded_sequence_right) self.assertEqual(sequence_length, padded_sequence_right_length) self.assertEqual(encoded_sequence, padded_sequence_right) tokenizer.padding_side = "left" padded_sequence_left = tokenizer.encode(sequence, padding=False) padded_sequence_left_length = len(padded_sequence_left) self.assertEqual(sequence_length, padded_sequence_left_length) self.assertEqual(encoded_sequence, padded_sequence_left) def test_right_and_left_truncation(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): sequence = "This is a test sequence" # RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True truncation_size = 3 tokenizer.truncation_side = "right" encoded_sequence = tokenizer.encode(sequence, add_special_tokens=False) sequence_length = len(encoded_sequence) # Remove EOS/BOS tokens truncated_sequence = tokenizer.encode( sequence, max_length=sequence_length - truncation_size, truncation=True, add_special_tokens=False ) truncated_sequence_length = len(truncated_sequence) self.assertEqual(sequence_length, truncated_sequence_length + truncation_size) self.assertEqual(encoded_sequence[:-truncation_size], truncated_sequence) # LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the truncation flag set to True tokenizer.truncation_side = "left" sequence_length = len(encoded_sequence) truncated_sequence = tokenizer.encode( sequence, max_length=sequence_length - truncation_size, truncation=True, add_special_tokens=False ) truncated_sequence_length = len(truncated_sequence) self.assertEqual(sequence_length, truncated_sequence_length + truncation_size) self.assertEqual(encoded_sequence[truncation_size:], truncated_sequence) # RIGHT & LEFT PADDING - Check that nothing is done for 'longest' and 'no_truncation' sequence_length = len(encoded_sequence) tokenizer.truncation_side = "right" truncated_sequence_right = tokenizer.encode(sequence, truncation=True, add_special_tokens=False) truncated_sequence_right_length = len(truncated_sequence_right) self.assertEqual(sequence_length, truncated_sequence_right_length) self.assertEqual(encoded_sequence, truncated_sequence_right) tokenizer.truncation_side = "left" truncated_sequence_left = tokenizer.encode( sequence, truncation="longest_first", add_special_tokens=False ) truncated_sequence_left_length = len(truncated_sequence_left) self.assertEqual(sequence_length, truncated_sequence_left_length) self.assertEqual(encoded_sequence, truncated_sequence_left) tokenizer.truncation_side = "right" truncated_sequence_right = tokenizer.encode(sequence, add_special_tokens=False) truncated_sequence_right_length = len(truncated_sequence_right) self.assertEqual(sequence_length, truncated_sequence_right_length) self.assertEqual(encoded_sequence, truncated_sequence_right) tokenizer.truncation_side = "left" truncated_sequence_left = tokenizer.encode(sequence, truncation=False, add_special_tokens=False) truncated_sequence_left_length = len(truncated_sequence_left) self.assertEqual(sequence_length, truncated_sequence_left_length) self.assertEqual(encoded_sequence, truncated_sequence_left) def test_padding_to_max_length(self): """We keep this test for backward compatibility but it should be remove when `pad_to_max_length` is deprecated.""" tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): sequence = "Sequence" padding_size = 10 # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer, sequence) padding_idx = tokenizer.pad_token_id # Check that it correctly pads when a maximum length is specified along with the padding flag set to True tokenizer.padding_side = "right" encoded_sequence = tokenizer.encode(sequence) sequence_length = len(encoded_sequence) # FIXME: the next line should be padding(max_length) to avoid warning padded_sequence = tokenizer.encode( sequence, max_length=sequence_length + padding_size, pad_to_max_length=True ) padded_sequence_length = len(padded_sequence) self.assertEqual(sequence_length + padding_size, padded_sequence_length) self.assertEqual(encoded_sequence + [padding_idx] * padding_size, padded_sequence) # Check that nothing is done when a maximum length is not specified encoded_sequence = tokenizer.encode(sequence) sequence_length = len(encoded_sequence) tokenizer.padding_side = "right" padded_sequence_right = tokenizer.encode(sequence, pad_to_max_length=True) padded_sequence_right_length = len(padded_sequence_right) self.assertEqual(sequence_length, padded_sequence_right_length) self.assertEqual(encoded_sequence, padded_sequence_right) def test_padding_to_multiple_of(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if tokenizer.pad_token is None: self.skipTest("No padding token.") else: empty_tokens = tokenizer("", padding=True, pad_to_multiple_of=8) normal_tokens = tokenizer("This is a sample input", padding=True, pad_to_multiple_of=8) for key, value in empty_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") for key, value in normal_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") normal_tokens = tokenizer("This", pad_to_multiple_of=8) for key, value in normal_tokens.items(): self.assertNotEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") # Should also work with truncation normal_tokens = tokenizer("This", padding=True, truncation=True, pad_to_multiple_of=8) for key, value in normal_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") # truncation to something which is not a multiple of pad_to_multiple_of raises an error self.assertRaises( ValueError, tokenizer.__call__, "This", padding=True, truncation=True, max_length=12, pad_to_multiple_of=8, ) def test_padding_with_attention_mask(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if tokenizer.pad_token is None: self.skipTest("No padding token.") if "attention_mask" not in tokenizer.model_input_names: self.skipTest("This model does not use attention mask.") features = [ {"input_ids": [1, 2, 3, 4, 5, 6], "attention_mask": [1, 1, 1, 1, 1, 0]}, {"input_ids": [1, 2, 3], "attention_mask": [1, 1, 0]}, ] padded_features = tokenizer.pad(features) if tokenizer.padding_side == "right": self.assertListEqual(padded_features["attention_mask"], [[1, 1, 1, 1, 1, 0], [1, 1, 0, 0, 0, 0]]) else: self.assertListEqual(padded_features["attention_mask"], [[1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 0]]) def test_encode_plus_with_padding(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): sequence = "Sequence" # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer, sequence) padding_size = 10 padding_idx = tokenizer.pad_token_id token_type_padding_idx = tokenizer.pad_token_type_id encoded_sequence = tokenizer.encode_plus(sequence, return_special_tokens_mask=True) input_ids = encoded_sequence["input_ids"] special_tokens_mask = encoded_sequence["special_tokens_mask"] sequence_length = len(input_ids) # Test 'longest' and 'no_padding' don't do anything tokenizer.padding_side = "right" not_padded_sequence = tokenizer.encode_plus( sequence, padding=True, return_special_tokens_mask=True, ) not_padded_input_ids = not_padded_sequence["input_ids"] not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"] not_padded_sequence_length = len(not_padded_input_ids) self.assertEqual(sequence_length, not_padded_sequence_length) self.assertEqual(input_ids, not_padded_input_ids) self.assertEqual(special_tokens_mask, not_padded_special_tokens_mask) not_padded_sequence = tokenizer.encode_plus( sequence, padding=False, return_special_tokens_mask=True, ) not_padded_input_ids = not_padded_sequence["input_ids"] not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"] not_padded_sequence_length = len(not_padded_input_ids) self.assertEqual(sequence_length, not_padded_sequence_length) self.assertEqual(input_ids, not_padded_input_ids) self.assertEqual(special_tokens_mask, not_padded_special_tokens_mask) # Test right padding tokenizer.padding_side = "right" right_padded_sequence = tokenizer.encode_plus( sequence, max_length=sequence_length + padding_size, padding="max_length", return_special_tokens_mask=True, ) right_padded_input_ids = right_padded_sequence["input_ids"] right_padded_special_tokens_mask = right_padded_sequence["special_tokens_mask"] right_padded_sequence_length = len(right_padded_input_ids) self.assertEqual(sequence_length + padding_size, right_padded_sequence_length) self.assertEqual(input_ids + [padding_idx] * padding_size, right_padded_input_ids) self.assertEqual(special_tokens_mask + [1] * padding_size, right_padded_special_tokens_mask) # Test left padding tokenizer.padding_side = "left" left_padded_sequence = tokenizer.encode_plus( sequence, max_length=sequence_length + padding_size, padding="max_length", return_special_tokens_mask=True, ) left_padded_input_ids = left_padded_sequence["input_ids"] left_padded_special_tokens_mask = left_padded_sequence["special_tokens_mask"] left_padded_sequence_length = len(left_padded_input_ids) self.assertEqual(sequence_length + padding_size, left_padded_sequence_length) self.assertEqual([padding_idx] * padding_size + input_ids, left_padded_input_ids) self.assertEqual([1] * padding_size + special_tokens_mask, left_padded_special_tokens_mask) if "token_type_ids" in tokenizer.model_input_names: token_type_ids = encoded_sequence["token_type_ids"] left_padded_token_type_ids = left_padded_sequence["token_type_ids"] right_padded_token_type_ids = right_padded_sequence["token_type_ids"] self.assertEqual( token_type_ids + [token_type_padding_idx] * padding_size, right_padded_token_type_ids ) self.assertEqual( [token_type_padding_idx] * padding_size + token_type_ids, left_padded_token_type_ids ) if "attention_mask" in tokenizer.model_input_names: attention_mask = encoded_sequence["attention_mask"] right_padded_attention_mask = right_padded_sequence["attention_mask"] left_padded_attention_mask = left_padded_sequence["attention_mask"] self.assertEqual(attention_mask + [0] * padding_size, right_padded_attention_mask) self.assertEqual([0] * padding_size + attention_mask, left_padded_attention_mask) def test_padding_warning_message_fast_tokenizer(self): if not self.test_rust_tokenizer: return sequence = "This is a text" tokenizer_fast = self.get_rust_tokenizer() # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer_fast, sequence) encoding_fast = tokenizer_fast(sequence) with self.assertLogs("transformers", level="WARNING") as cm: tokenizer_fast.pad(encoding_fast) self.assertEqual(len(cm.records), 1) self.assertIn( "Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to" " encode the text followed by a call to the `pad` method to get a padded encoding.", cm.records[0].message, ) if not self.test_slow_tokenizer: return tokenizer_slow = self.get_tokenizer() # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer_slow, sequence) encoding_slow = tokenizer_slow(sequence) with self.assertLogs(level="WARNING") as cm: # We want to assert there are no warnings, but the 'assertLogs' method does not support that. # Therefore, we are adding a dummy warning, and then we will assert it is the only warning. logger.warning("Dummy warning") tokenizer_slow.pad(encoding_slow) self.assertEqual(len(cm.records), 1) self.assertIn( "Dummy warning", cm.records[0].message, ) def test_separate_tokenizers(self): # This tests that tokenizers don't impact others. Unfortunately the case where it fails is when # we're loading an S3 configuration from a pre-trained identifier, and we have no way of testing those today. tokenizers = self.get_tokenizers(random_argument=True) new_tokenizers = self.get_tokenizers(random_argument=False) for tokenizer, new_tokenizer in zip(tokenizers, new_tokenizers): with self.subTest(f"{tokenizer.__class__.__name__}"): self.assertTrue(tokenizer.init_kwargs["random_argument"]) self.assertTrue(tokenizer.init_kwargs["random_argument"]) self.assertFalse(new_tokenizer.init_kwargs["random_argument"]) def test_get_vocab(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): vocab_dict = tokenizer.get_vocab() self.assertIsInstance(vocab_dict, dict) self.assertGreaterEqual(len(tokenizer), len(vocab_dict)) vocab = [tokenizer.convert_ids_to_tokens(i) for i in range(len(tokenizer))] self.assertEqual(len(vocab), len(tokenizer)) tokenizer.add_tokens(["asdfasdfasdfasdf"]) vocab = [tokenizer.convert_ids_to_tokens(i) for i in range(len(tokenizer))] self.assertEqual(len(vocab), len(tokenizer)) def test_conversion_reversible(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): vocab = tokenizer.get_vocab() for word, ind in vocab.items(): if word == tokenizer.unk_token: continue self.assertEqual(tokenizer.convert_tokens_to_ids(word), ind) self.assertEqual(tokenizer.convert_ids_to_tokens(ind), word) def test_call(self): # Tests that all call wrap to encode_plus and batch_encode_plus tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): sequences = [ "Testing batch encode plus", "Testing batch encode plus with different sequence lengths", "Testing batch encode plus with different sequence lengths correctly pads", ] # Test not batched encoded_sequences_1 = tokenizer.encode_plus(sequences[0]) encoded_sequences_2 = tokenizer(sequences[0]) self.assertEqual(encoded_sequences_1, encoded_sequences_2) # Test not batched pairs encoded_sequences_1 = tokenizer.encode_plus(sequences[0], sequences[1]) encoded_sequences_2 = tokenizer(sequences[0], sequences[1]) self.assertEqual(encoded_sequences_1, encoded_sequences_2) # Test batched encoded_sequences_1 = tokenizer.batch_encode_plus(sequences) encoded_sequences_2 = tokenizer(sequences) self.assertEqual(encoded_sequences_1, encoded_sequences_2) # Test batched pairs encoded_sequences_1 = tokenizer.batch_encode_plus(list(zip(sequences, sequences))) encoded_sequences_2 = tokenizer(sequences, sequences) self.assertEqual(encoded_sequences_1, encoded_sequences_2) def test_batch_encode_plus_batch_sequence_length(self): # Tests that all encoded values have the correct size tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): sequences = [ "Testing batch encode plus", "Testing batch encode plus with different sequence lengths", "Testing batch encode plus with different sequence lengths correctly pads", ] encoded_sequences = [tokenizer.encode_plus(sequence) for sequence in sequences] encoded_sequences_batch = tokenizer.batch_encode_plus(sequences, padding=False) self.assertListEqual( encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch) ) maximum_length = len( max([encoded_sequence["input_ids"] for encoded_sequence in encoded_sequences], key=len) ) # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer, sequences) encoded_sequences_padded = [ tokenizer.encode_plus(sequence, max_length=maximum_length, padding="max_length") for sequence in sequences ] encoded_sequences_batch_padded = tokenizer.batch_encode_plus(sequences, padding=True) self.assertListEqual( encoded_sequences_padded, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch_padded), ) # check 'longest' is unsensitive to a max length encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(sequences, padding=True) encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus( sequences, max_length=maximum_length + 10, padding="longest" ) for key in encoded_sequences_batch_padded_1.keys(): self.assertListEqual( encoded_sequences_batch_padded_1[key], encoded_sequences_batch_padded_2[key], ) # check 'no_padding' is unsensitive to a max length encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(sequences, padding=False) encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus( sequences, max_length=maximum_length + 10, padding=False ) for key in encoded_sequences_batch_padded_1.keys(): self.assertListEqual( encoded_sequences_batch_padded_1[key], encoded_sequences_batch_padded_2[key], ) @require_tokenizers def test_added_token_are_matched_longest_first(self): if not self.test_slow_tokenizer: self.skipTest("This test is only for slow tokenizers") return tokenizers = self.get_tokenizers(fast=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): try: tokenizer.add_tokens([AddedToken("extra_id_1")]) tokenizer.add_tokens([AddedToken("extra_id_100")]) except Exception: # Canine cannot add tokens which are not codepoints self.skipTest("Cannot add those Added tokens") # XXX: This used to split on `extra_id_1` first we're matching # longest first now. tokens = tokenizer.tokenize("This is some extra_id_100") self.assertIn("extra_id_100", tokens) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): tokenizer.add_tokens([AddedToken("extra_id_100")]) tokenizer.add_tokens([AddedToken("extra_id_1")]) tokens = tokenizer.tokenize("This is some extra_id_100") self.assertIn("extra_id_100", tokens) @require_tokenizers def test_added_token_serializable(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): new_token = AddedToken("new_token", lstrip=True) tokenizer.add_special_tokens({"additional_special_tokens": [new_token]}) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(tmp_dir_name) tokenizer.from_pretrained(tmp_dir_name) def test_batch_encode_plus_padding(self): # Test that padded sequences are equivalent between batch_encode_plus and encode_plus # Right padding tests tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): sequences = [ "Testing batch encode plus", "Testing batch encode plus with different sequence lengths", "Testing batch encode plus with different sequence lengths correctly pads", ] max_length = 100 # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer, sequences) encoded_sequences = [ tokenizer.encode_plus(sequence, max_length=max_length, padding="max_length") for sequence in sequences ] encoded_sequences_batch = tokenizer.batch_encode_plus( sequences, max_length=max_length, padding="max_length" ) self.assertListEqual( encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch) ) # Left padding tests tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): tokenizer.padding_side = "left" sequences = [ "Testing batch encode plus", "Testing batch encode plus with different sequence lengths", "Testing batch encode plus with different sequence lengths correctly pads", ] max_length = 100 # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer, sequences) encoded_sequences = [ tokenizer.encode_plus(sequence, max_length=max_length, padding="max_length") for sequence in sequences ] encoded_sequences_batch = tokenizer.batch_encode_plus( sequences, max_length=max_length, padding="max_length" ) self.assertListEqual( encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch) ) def test_pretokenized_inputs(self): # Test when inputs are pretokenized tokenizers = self.get_tokenizers(do_lower_case=False) # , add_prefix_space=True) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if hasattr(tokenizer, "add_prefix_space") and not tokenizer.add_prefix_space: continue # Prepare a sequence from our tokenizer vocabulary sequence, ids = self.get_clean_sequence(tokenizer, with_prefix_space=True, max_length=20) # sequence = " " + sequence # To be sure the byte-level tokenizers are feeling good token_sequence = sequence.split() # sequence_no_prefix_space = sequence.strip() # Test encode for pretokenized inputs output = tokenizer.encode(token_sequence, is_split_into_words=True, add_special_tokens=False) output_sequence = tokenizer.encode(sequence, add_special_tokens=False) self.assertEqual(output, output_sequence) output = tokenizer.encode(token_sequence, is_split_into_words=True, add_special_tokens=True) output_sequence = tokenizer.encode(sequence, add_special_tokens=True) self.assertEqual(output, output_sequence) # Test encode_plus for pretokenized inputs output = tokenizer.encode_plus(token_sequence, is_split_into_words=True, add_special_tokens=False) output_sequence = tokenizer.encode_plus(sequence, add_special_tokens=False) for key in output.keys(): self.assertEqual(output[key], output_sequence[key]) output = tokenizer.encode_plus(token_sequence, is_split_into_words=True, add_special_tokens=True) output_sequence = tokenizer.encode_plus(sequence, add_special_tokens=True) for key in output.keys(): self.assertEqual(output[key], output_sequence[key]) # Test batch_encode_plus for pretokenized inputs sequence_batch = [sequence.strip()] * 2 + [sequence.strip() + " " + sequence.strip()] token_sequence_batch = [s.split() for s in sequence_batch] sequence_batch_cleaned_up_spaces = [" " + " ".join(s) for s in token_sequence_batch] output = tokenizer.batch_encode_plus( token_sequence_batch, is_split_into_words=True, add_special_tokens=False ) output_sequence = tokenizer.batch_encode_plus( sequence_batch_cleaned_up_spaces, add_special_tokens=False ) for key in output.keys(): self.assertEqual(output[key], output_sequence[key]) output = tokenizer.batch_encode_plus( token_sequence_batch, is_split_into_words=True, add_special_tokens=True ) output_sequence = tokenizer.batch_encode_plus( sequence_batch_cleaned_up_spaces, add_special_tokens=True ) for key in output.keys(): self.assertEqual(output[key], output_sequence[key]) # Test encode for pretokenized inputs pairs output = tokenizer.encode( token_sequence, token_sequence, is_split_into_words=True, add_special_tokens=False ) output_sequence = tokenizer.encode(sequence, sequence, add_special_tokens=False) self.assertEqual(output, output_sequence) output = tokenizer.encode( token_sequence, token_sequence, is_split_into_words=True, add_special_tokens=True ) output_sequence = tokenizer.encode(sequence, sequence, add_special_tokens=True) self.assertEqual(output, output_sequence) # Test encode_plus for pretokenized inputs pairs output = tokenizer.encode_plus( token_sequence, token_sequence, is_split_into_words=True, add_special_tokens=False ) output_sequence = tokenizer.encode_plus(sequence, sequence, add_special_tokens=False) for key in output.keys(): self.assertEqual(output[key], output_sequence[key]) output = tokenizer.encode_plus( token_sequence, token_sequence, is_split_into_words=True, add_special_tokens=True ) output_sequence = tokenizer.encode_plus(sequence, sequence, add_special_tokens=True) for key in output.keys(): self.assertEqual(output[key], output_sequence[key]) # Test batch_encode_plus for pretokenized inputs pairs sequence_pair_batch = [(sequence.strip(), sequence.strip())] * 2 + [ (sequence.strip() + " " + sequence.strip(), sequence.strip()) ] token_sequence_pair_batch = [tuple(s.split() for s in pair) for pair in sequence_pair_batch] sequence_pair_batch_cleaned_up_spaces = [ tuple(" " + " ".join(s) for s in pair) for pair in token_sequence_pair_batch ] output = tokenizer.batch_encode_plus( token_sequence_pair_batch, is_split_into_words=True, add_special_tokens=False ) output_sequence = tokenizer.batch_encode_plus( sequence_pair_batch_cleaned_up_spaces, add_special_tokens=False ) for key in output.keys(): self.assertEqual(output[key], output_sequence[key]) output = tokenizer.batch_encode_plus( token_sequence_pair_batch, is_split_into_words=True, add_special_tokens=True ) output_sequence = tokenizer.batch_encode_plus( sequence_pair_batch_cleaned_up_spaces, add_special_tokens=True ) for key in output.keys(): self.assertEqual(output[key], output_sequence[key]) def test_prepare_for_model(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): string_sequence = "Testing the prepare_for_model method." ids = tokenizer.encode(string_sequence, add_special_tokens=False) prepared_input_dict = tokenizer.prepare_for_model(ids, add_special_tokens=True) input_dict = tokenizer.encode_plus(string_sequence, add_special_tokens=True) self.assertEqual(input_dict, prepared_input_dict) def test_batch_encode_plus_overflowing_tokens(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: string_sequences = ["Testing the prepare_for_model method.", "Test"] if tokenizer.pad_token is None: tokenizer.add_special_tokens({"pad_token": "[PAD]"}) tokenizer.batch_encode_plus( string_sequences, return_overflowing_tokens=True, truncation=True, padding=True, max_length=3 ) @is_pt_tf_cross_test def test_batch_encode_plus_tensors(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): sequences = [ "Testing batch encode plus", "Testing batch encode plus with different sequence lengths", "Testing batch encode plus with different sequence lengths correctly pads", ] # A Tensor cannot be build by sequences which are not the same size self.assertRaises(ValueError, tokenizer.batch_encode_plus, sequences, return_tensors="pt") self.assertRaises(ValueError, tokenizer.batch_encode_plus, sequences, return_tensors="tf") if tokenizer.pad_token_id is None: self.assertRaises( ValueError, tokenizer.batch_encode_plus, sequences, padding=True, return_tensors="pt", ) self.assertRaises( ValueError, tokenizer.batch_encode_plus, sequences, padding="longest", return_tensors="tf", ) else: pytorch_tensor = tokenizer.batch_encode_plus(sequences, padding=True, return_tensors="pt") tensorflow_tensor = tokenizer.batch_encode_plus(sequences, padding="longest", return_tensors="tf") encoded_sequences = tokenizer.batch_encode_plus(sequences, padding=True) for key in encoded_sequences.keys(): pytorch_value = pytorch_tensor[key].tolist() tensorflow_value = tensorflow_tensor[key].numpy().tolist() encoded_value = encoded_sequences[key] self.assertEqual(pytorch_value, tensorflow_value, encoded_value) def _check_no_pad_token_padding(self, tokenizer, sequences): # if tokenizer does not have pad_token_id, an error should be thrown if tokenizer.pad_token_id is None: with self.assertRaises(ValueError): if isinstance(sequences, list): tokenizer.batch_encode_plus(sequences, padding="longest") else: tokenizer.encode_plus(sequences, padding=True) # add pad_token_id to pass subsequent tests tokenizer.add_special_tokens({"pad_token": "<PAD>"}) @require_torch @slow def test_torch_encode_plus_sent_to_model(self): import torch from transformers import MODEL_MAPPING, TOKENIZER_MAPPING MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING) tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING: return config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__] config = config_class() if config.is_encoder_decoder or config.pad_token_id is None: return model = model_class(config) # Make sure the model contains at least the full vocabulary size in its embedding matrix is_using_common_embeddings = hasattr(model.get_input_embeddings(), "weight") if is_using_common_embeddings: self.assertGreaterEqual(model.get_input_embeddings().weight.shape[0], len(tokenizer)) # Build sequence first_ten_tokens = list(tokenizer.get_vocab().keys())[:10] sequence = " ".join(first_ten_tokens) encoded_sequence = tokenizer.encode_plus(sequence, return_tensors="pt") # Ensure that the BatchEncoding.to() method works. encoded_sequence.to(model.device) batch_encoded_sequence = tokenizer.batch_encode_plus([sequence, sequence], return_tensors="pt") # This should not fail with torch.no_grad(): # saves some time model(**encoded_sequence) model(**batch_encoded_sequence) # if self.test_rust_tokenizer: # fast_tokenizer = self.get_rust_tokenizer() # encoded_sequence_fast = fast_tokenizer.encode_plus(sequence, return_tensors="pt") # batch_encoded_sequence_fast = fast_tokenizer.batch_encode_plus([sequence, sequence], return_tensors="pt") # # This should not fail # model(**encoded_sequence_fast) # model(**batch_encoded_sequence_fast) @require_tf @slow def test_tf_encode_plus_sent_to_model(self): from transformers import TF_MODEL_MAPPING, TOKENIZER_MAPPING MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(TF_MODEL_MAPPING, TOKENIZER_MAPPING) tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING: return config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__] config = config_class() if config.is_encoder_decoder or config.pad_token_id is None: return model = model_class(config) # Make sure the model contains at least the full vocabulary size in its embedding matrix self.assertGreaterEqual(model.config.vocab_size, len(tokenizer)) # Build sequence first_ten_tokens = list(tokenizer.get_vocab().keys())[:10] sequence = " ".join(first_ten_tokens) encoded_sequence = tokenizer.encode_plus(sequence, return_tensors="tf") batch_encoded_sequence = tokenizer.batch_encode_plus([sequence, sequence], return_tensors="tf") # This should not fail model(encoded_sequence) model(batch_encoded_sequence) # TODO: Check if require_torch is the best to test for numpy here ... Maybe move to require_flax when available @require_torch @slow def test_np_encode_plus_sent_to_model(self): from transformers import MODEL_MAPPING, TOKENIZER_MAPPING MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING) tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING: return config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__] config = config_class() if config.is_encoder_decoder or config.pad_token_id is None: return # Build sequence first_ten_tokens = list(tokenizer.get_vocab().keys())[:10] sequence = " ".join(first_ten_tokens) encoded_sequence = tokenizer.encode_plus(sequence, return_tensors="np") batch_encoded_sequence = tokenizer.batch_encode_plus([sequence, sequence], return_tensors="np") # TODO: add forward through JAX/Flax when PR is merged # This is currently here to make ruff happy ! if encoded_sequence is None: raise ValueError("Cannot convert list to numpy tensor on encode_plus()") if batch_encoded_sequence is None: raise ValueError("Cannot convert list to numpy tensor on batch_encode_plus()") if self.test_rust_tokenizer: fast_tokenizer = self.get_rust_tokenizer() encoded_sequence_fast = fast_tokenizer.encode_plus(sequence, return_tensors="np") batch_encoded_sequence_fast = fast_tokenizer.batch_encode_plus( [sequence, sequence], return_tensors="np" ) # TODO: add forward through JAX/Flax when PR is merged # This is currently here to make ruff happy ! if encoded_sequence_fast is None: raise ValueError("Cannot convert list to numpy tensor on encode_plus() (fast)") if batch_encoded_sequence_fast is None: raise ValueError("Cannot convert list to numpy tensor on batch_encode_plus() (fast)") @require_torch def test_prepare_seq2seq_batch(self): if not self.test_seq2seq: return tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Longer text that will definitely require truncation. src_text = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for" " Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons" " will only worsen the violence and misery for millions of people.", ] tgt_text = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al" ' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi' " că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] try: batch = tokenizer.prepare_seq2seq_batch( src_texts=src_text, tgt_texts=tgt_text, max_length=3, max_target_length=10, return_tensors="pt", src_lang="en_XX", # this should be ignored (for all but mbart) but not cause an error ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.labels.shape[1], 10) # max_target_length will default to max_length if not specified batch = tokenizer.prepare_seq2seq_batch( src_text, tgt_texts=tgt_text, max_length=3, return_tensors="pt" ) self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.labels.shape[1], 3) batch_encoder_only = tokenizer.prepare_seq2seq_batch( src_texts=src_text, max_length=3, max_target_length=10, return_tensors="pt" ) self.assertEqual(batch_encoder_only.input_ids.shape[1], 3) self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3) self.assertNotIn("decoder_input_ids", batch_encoder_only) def test_is_fast(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) # Check is_fast is set correctly self.assertTrue(tokenizer_r.is_fast) if self.test_slow_tokenizer: tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) self.assertFalse(tokenizer_p.is_fast) def test_fast_only_inputs(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) # Ensure None raise an error self.assertRaises(TypeError, tokenizer_r.tokenize, None) self.assertRaises(TypeError, tokenizer_r.encode, None) self.assertRaises(TypeError, tokenizer_r.encode_plus, None) self.assertRaises(TypeError, tokenizer_r.batch_encode_plus, None) def test_alignement_methods(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) words = ["Wonderful", "no", "inspiration", "example", "with", "subtoken"] text = " ".join(words) batch_size = 3 encoding = tokenizer_r.encode_plus(text, add_special_tokens=False) batch_encoding = tokenizer_r.batch_encode_plus([text] * batch_size, add_special_tokens=False) num_tokens = len(encoding["input_ids"]) last_word_index = len(words) - 1 last_token_index = num_tokens - 1 last_batch_index = batch_size - 1 last_char_index = len(text) - 1 # words, tokens self.assertEqual(len(encoding.words(0)), num_tokens) self.assertEqual(max(encoding.words(0)), last_word_index) self.assertEqual(min(encoding.words(0)), 0) self.assertEqual(len(batch_encoding.words(last_batch_index)), num_tokens) self.assertEqual(max(batch_encoding.words(last_batch_index)), last_word_index) self.assertEqual(min(batch_encoding.words(last_batch_index)), 0) self.assertEqual(len(encoding.tokens(0)), num_tokens) # Assert token_to_word self.assertEqual(encoding.token_to_word(0), 0) self.assertEqual(encoding.token_to_word(0, 0), 0) self.assertEqual(encoding.token_to_word(last_token_index), last_word_index) self.assertEqual(encoding.token_to_word(0, last_token_index), last_word_index) self.assertEqual(batch_encoding.token_to_word(1, 0), 0) self.assertEqual(batch_encoding.token_to_word(0, last_token_index), last_word_index) self.assertEqual(batch_encoding.token_to_word(last_batch_index, last_token_index), last_word_index) # Assert word_to_tokens self.assertEqual(encoding.word_to_tokens(0).start, 0) self.assertEqual(encoding.word_to_tokens(0, 0).start, 0) self.assertEqual(encoding.word_to_tokens(last_word_index).end, last_token_index + 1) self.assertEqual(encoding.word_to_tokens(0, last_word_index).end, last_token_index + 1) self.assertEqual(batch_encoding.word_to_tokens(1, 0).start, 0) self.assertEqual(batch_encoding.word_to_tokens(0, last_word_index).end, last_token_index + 1) self.assertEqual( batch_encoding.word_to_tokens(last_batch_index, last_word_index).end, last_token_index + 1 ) # Assert token_to_chars self.assertEqual(encoding.token_to_chars(0).start, 0) self.assertEqual(encoding.token_to_chars(0, 0).start, 0) self.assertEqual(encoding.token_to_chars(last_token_index).end, last_char_index + 1) self.assertEqual(encoding.token_to_chars(0, last_token_index).end, last_char_index + 1) self.assertEqual(batch_encoding.token_to_chars(1, 0).start, 0) self.assertEqual(batch_encoding.token_to_chars(0, last_token_index).end, last_char_index + 1) self.assertEqual( batch_encoding.token_to_chars(last_batch_index, last_token_index).end, last_char_index + 1 ) # Assert char_to_token self.assertEqual(encoding.char_to_token(0), 0) self.assertEqual(encoding.char_to_token(0, 0), 0) self.assertEqual(encoding.char_to_token(last_char_index), last_token_index) self.assertEqual(encoding.char_to_token(0, last_char_index), last_token_index) self.assertEqual(batch_encoding.char_to_token(1, 0), 0) self.assertEqual(batch_encoding.char_to_token(0, last_char_index), last_token_index) self.assertEqual(batch_encoding.char_to_token(last_batch_index, last_char_index), last_token_index) # Assert char_to_word self.assertEqual(encoding.char_to_word(0), 0) self.assertEqual(encoding.char_to_word(0, 0), 0) self.assertEqual(encoding.char_to_word(last_char_index), last_word_index) self.assertEqual(encoding.char_to_word(0, last_char_index), last_word_index) self.assertEqual(batch_encoding.char_to_word(1, 0), 0) self.assertEqual(batch_encoding.char_to_word(0, last_char_index), last_word_index) self.assertEqual(batch_encoding.char_to_word(last_batch_index, last_char_index), last_word_index) # Assert word_to_chars self.assertEqual(encoding.word_to_chars(0).start, 0) self.assertEqual(encoding.word_to_chars(0, 0).start, 0) self.assertEqual(encoding.word_to_chars(last_word_index).end, last_char_index + 1) self.assertEqual(encoding.word_to_chars(0, last_word_index).end, last_char_index + 1) self.assertEqual(batch_encoding.word_to_chars(1, 0).start, 0) self.assertEqual(batch_encoding.word_to_chars(0, last_word_index).end, last_char_index + 1) self.assertEqual( batch_encoding.word_to_chars(last_batch_index, last_word_index).end, last_char_index + 1 ) # Assert token_to_sequence self.assertEqual(encoding.token_to_sequence(num_tokens // 2), 0) self.assertEqual(encoding.token_to_sequence(0, num_tokens // 2), 0) self.assertEqual(batch_encoding.token_to_sequence(1, num_tokens // 2), 0) self.assertEqual(batch_encoding.token_to_sequence(0, num_tokens // 2), 0) self.assertEqual(batch_encoding.token_to_sequence(last_batch_index, num_tokens // 2), 0) # Pair of input sequences words = ["Wonderful", "no", "inspiration", "example", "with", "subtoken"] text = " ".join(words) pair_words = ["Amazing", "example", "full", "of", "inspiration"] pair_text = " ".join(pair_words) batch_size = 3 index_word_in_first_seq = words.index("inspiration") index_word_in_pair_seq = pair_words.index("inspiration") index_char_in_first_seq = text.find("inspiration") index_char_in_pair_seq = pair_text.find("inspiration") pair_encoding = tokenizer_r.encode_plus(text, pair_text, add_special_tokens=False) pair_batch_encoding = tokenizer_r.batch_encode_plus( [(text, pair_text)] * batch_size, add_special_tokens=False ) num_tokens = len(encoding["input_ids"]) last_word_index = len(words) - 1 last_token_index = num_tokens - 1 last_batch_index = batch_size - 1 last_char_index = len(text) - 1 # Assert word_to_tokens self.assertNotEqual( pair_encoding.word_to_tokens(index_word_in_first_seq, sequence_index=0).start, pair_encoding.word_to_tokens(index_word_in_pair_seq, sequence_index=1).start, ) self.assertEqual( pair_encoding["input_ids"][ pair_encoding.word_to_tokens(index_word_in_first_seq, sequence_index=0).start ], pair_encoding["input_ids"][ pair_encoding.word_to_tokens(index_word_in_pair_seq, sequence_index=1).start ], ) self.assertNotEqual( pair_batch_encoding.word_to_tokens(1, index_word_in_first_seq, sequence_index=0).start, pair_batch_encoding.word_to_tokens(1, index_word_in_pair_seq, sequence_index=1).start, ) self.assertEqual( pair_batch_encoding["input_ids"][1][ pair_batch_encoding.word_to_tokens(1, index_word_in_first_seq, sequence_index=0).start ], pair_batch_encoding["input_ids"][1][ pair_batch_encoding.word_to_tokens(1, index_word_in_pair_seq, sequence_index=1).start ], ) # Assert char_to_token self.assertNotEqual( pair_encoding.char_to_token(index_char_in_first_seq, sequence_index=0), pair_encoding.char_to_token(index_char_in_pair_seq, sequence_index=1), ) self.assertEqual( pair_encoding["input_ids"][pair_encoding.char_to_token(index_char_in_first_seq, sequence_index=0)], pair_encoding["input_ids"][pair_encoding.char_to_token(index_char_in_pair_seq, sequence_index=1)], ) self.assertNotEqual( pair_batch_encoding.char_to_token(1, index_char_in_first_seq, sequence_index=0), pair_batch_encoding.char_to_token(1, index_char_in_pair_seq, sequence_index=1), ) self.assertEqual( pair_batch_encoding["input_ids"][1][ pair_batch_encoding.char_to_token(1, index_char_in_first_seq, sequence_index=0) ], pair_batch_encoding["input_ids"][1][ pair_batch_encoding.char_to_token(1, index_char_in_pair_seq, sequence_index=1) ], ) # Assert char_to_word self.assertNotEqual( pair_encoding.char_to_word(index_char_in_first_seq, sequence_index=0), pair_encoding.char_to_word(index_char_in_pair_seq, sequence_index=1), ) self.assertEqual( words[pair_encoding.char_to_word(index_char_in_first_seq, sequence_index=0)], pair_words[pair_encoding.char_to_word(index_char_in_pair_seq, sequence_index=1)], ) self.assertNotEqual( pair_batch_encoding.char_to_word(1, index_char_in_first_seq, sequence_index=0), pair_batch_encoding.char_to_word(1, index_char_in_pair_seq, sequence_index=1), ) self.assertEqual( words[pair_batch_encoding.char_to_word(1, index_char_in_first_seq, sequence_index=0)], pair_words[pair_batch_encoding.char_to_word(1, index_char_in_pair_seq, sequence_index=1)], ) # Assert word_to_chars self.assertNotEqual( pair_encoding.word_to_chars(index_word_in_first_seq, sequence_index=0).start, pair_encoding.word_to_chars(index_word_in_pair_seq, sequence_index=1).start, ) self.assertEqual( text[pair_encoding.word_to_chars(index_word_in_first_seq, sequence_index=0).start], pair_text[pair_encoding.word_to_chars(index_word_in_pair_seq, sequence_index=1).start], ) self.assertNotEqual( pair_batch_encoding.word_to_chars(1, index_word_in_first_seq, sequence_index=0).start, pair_batch_encoding.word_to_chars(1, index_word_in_pair_seq, sequence_index=1).start, ) self.assertEqual( text[pair_batch_encoding.word_to_chars(1, index_word_in_first_seq, sequence_index=0).start], pair_text[pair_batch_encoding.word_to_chars(1, index_word_in_pair_seq, sequence_index=1).start], ) # Assert token_to_sequence pair_encoding = tokenizer_r.encode_plus(text, pair_text, add_special_tokens=True) pair_sequence_ids = [ pair_encoding.token_to_sequence(i) for i in range(len(pair_encoding["input_ids"])) ] self.assertIn(0, pair_sequence_ids) self.assertIn(1, pair_sequence_ids) if tokenizer_r.num_special_tokens_to_add(pair=True): self.assertIn(None, pair_sequence_ids) pair_batch_encoding = tokenizer_r.batch_encode_plus( [(text, pair_text)] * batch_size, add_special_tokens=True ) pair_batch_sequence_ids = [ pair_batch_encoding.token_to_sequence(1, i) for i in range(len(pair_batch_encoding["input_ids"][0])) ] self.assertIn(0, pair_batch_sequence_ids) self.assertIn(1, pair_batch_sequence_ids) if tokenizer_r.num_special_tokens_to_add(pair=True): self.assertIn(None, pair_batch_sequence_ids) def test_tokenization_python_rust_equals(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) # Ensure basic input match input_p = tokenizer_p.encode_plus(self._data) input_r = tokenizer_r.encode_plus(self._data) for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()): self.assertSequenceEqual(input_p[key], input_r[key]) input_pairs_p = tokenizer_p.encode_plus(self._data, self._data) input_pairs_r = tokenizer_r.encode_plus(self._data, self._data) for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()): self.assertSequenceEqual(input_pairs_p[key], input_pairs_r[key]) # Ensure truncation match input_p = tokenizer_p.encode_plus(self._data, max_length=512, truncation=True) input_r = tokenizer_r.encode_plus(self._data, max_length=512, truncation=True) for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()): self.assertSequenceEqual(input_p[key], input_r[key]) # Ensure truncation with stride match input_p = tokenizer_p.encode_plus( self._data, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True ) input_r = tokenizer_r.encode_plus( self._data, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True ) for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()): self.assertSequenceEqual(input_p[key], input_r[key][0]) def test_num_special_tokens_to_add_equal(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) # Check we have the same number of added_tokens for both pair and non-pair inputs. self.assertEqual( tokenizer_r.num_special_tokens_to_add(False), tokenizer_p.num_special_tokens_to_add(False) ) self.assertEqual( tokenizer_r.num_special_tokens_to_add(True), tokenizer_p.num_special_tokens_to_add(True) ) def test_max_length_equal(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) # Check we have the correct max_length for both pair and non-pair inputs. self.assertEqual(tokenizer_r.max_len_single_sentence, tokenizer_p.max_len_single_sentence) self.assertEqual(tokenizer_r.max_len_sentences_pair, tokenizer_p.max_len_sentences_pair) def test_special_tokens_map_equal(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) # Assert the set of special tokens match. self.assertSequenceEqual( tokenizer_p.special_tokens_map.items(), tokenizer_r.special_tokens_map.items(), ) def test_add_tokens(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) vocab_size = len(tokenizer_r) self.assertEqual(tokenizer_r.add_tokens(""), 0) self.assertEqual(tokenizer_r.add_tokens("testoken"), 1) self.assertEqual(tokenizer_r.add_tokens(["testoken1", "testtoken2"]), 2) self.assertEqual(len(tokenizer_r), vocab_size + 3) self.assertEqual(tokenizer_r.add_special_tokens({}), 0) self.assertEqual(tokenizer_r.add_special_tokens({"bos_token": "[BOS]", "eos_token": "[EOS]"}), 2) self.assertRaises( AssertionError, tokenizer_r.add_special_tokens, {"additional_special_tokens": "<testtoken1>"} ) self.assertEqual(tokenizer_r.add_special_tokens({"additional_special_tokens": ["<testtoken2>"]}), 1) self.assertEqual( tokenizer_r.add_special_tokens({"additional_special_tokens": ["<testtoken3>", "<testtoken4>"]}), 2 ) self.assertIn("<testtoken3>", tokenizer_r.special_tokens_map["additional_special_tokens"]) self.assertIsInstance(tokenizer_r.special_tokens_map["additional_special_tokens"], list) self.assertGreaterEqual(len(tokenizer_r.special_tokens_map["additional_special_tokens"]), 2) self.assertEqual(len(tokenizer_r), vocab_size + 8) def test_offsets_mapping(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) text = "Wonderful no inspiration example with subtoken" pair = "Along with an awesome pair" # No pair tokens_with_offsets = tokenizer_r.encode_plus( text, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True ) added_tokens = tokenizer_r.num_special_tokens_to_add(False) offsets = tokens_with_offsets["offset_mapping"] # Assert there is the same number of tokens and offsets self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"])) # Assert there is online added_tokens special_tokens self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens) # Pairs tokens_with_offsets = tokenizer_r.encode_plus( text, pair, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True ) added_tokens = tokenizer_r.num_special_tokens_to_add(True) offsets = tokens_with_offsets["offset_mapping"] # Assert there is the same number of tokens and offsets self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"])) # Assert there is online added_tokens special_tokens self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens) def test_batch_encode_dynamic_overflowing(self): """ When calling batch_encode with multiple sequence it can returns different number of overflowing encoding for each sequence: [ Sequence 1: [Encoding 1, Encoding 2], Sequence 2: [Encoding 1], Sequence 3: [Encoding 1, Encoding 2, ... Encoding N] ] This needs to be padded so that it can represented as a tensor """ for tokenizer, pretrained_name, kwargs in self.tokenizers_list: tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})"): if is_torch_available(): returned_tensor = "pt" elif is_tf_available(): returned_tensor = "tf" elif is_flax_available(): returned_tensor = "jax" else: return if not tokenizer.pad_token or tokenizer.pad_token_id < 0: return tokens = tokenizer.encode_plus( "HuggingFace is solving NLP one commit at a time", max_length=6, padding=True, truncation=True, return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): self.assertEqual(len(tokens[key].shape), 2) # Mono sample tokens = tokenizer.batch_encode_plus( ["HuggingFace is solving NLP one commit at a time"], max_length=6, padding=True, truncation="only_first", return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): self.assertEqual(len(tokens[key].shape), 2) self.assertEqual(tokens[key].shape[-1], 6) # Multi sample tokens = tokenizer.batch_encode_plus( ["HuggingFace is solving NLP one commit at a time", "Very tiny input"], max_length=6, padding=True, truncation="only_first", return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): self.assertEqual(len(tokens[key].shape), 2) self.assertEqual(tokens[key].shape[-1], 6) def test_compare_pretokenized_inputs(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) if hasattr(tokenizer_p, "add_prefix_space") and not tokenizer_p.add_prefix_space: continue # Too hard to test for now # Input string pretokenized_input_simple = "This is a sample input".split() pretokenized_input_pair = "This is a sample pair".split() # Test encode for pretokenized inputs output_r = tokenizer_r.encode( pretokenized_input_simple, is_split_into_words=True, add_special_tokens=False ) output_p = tokenizer_p.encode( pretokenized_input_simple, is_split_into_words=True, add_special_tokens=False ) self.assertEqual(output_p, output_r) kwargs = { "is_split_into_words": True, # "return_token_type_ids": True, # Use the defaults for each tokenizers # "return_attention_mask": True, # Use the defaults for each tokenizers "return_overflowing_tokens": False, "return_special_tokens_mask": True, "return_offsets_mapping": False, # Not implemented in python tokenizers # "add_special_tokens": False, } batch_kwargs = { "is_split_into_words": True, # "return_token_type_ids": True, # Use the defaults for each tokenizers # "return_attention_mask": True, # Use the defaults for each tokenizers "return_overflowing_tokens": False, "return_special_tokens_mask": True, "return_offsets_mapping": False, # Not implemented in python tokenizers # "add_special_tokens": False, } # Test encode_plus for pretokenized inputs output_r = tokenizer_r.encode_plus(pretokenized_input_simple, **kwargs) output_p = tokenizer_p.encode_plus(pretokenized_input_simple, **kwargs) for key in output_p.keys(): self.assertEqual(output_p[key], output_r[key]) # Test batch_encode_plus for pretokenized inputs input_batch = ([pretokenized_input_simple] * 2) + [pretokenized_input_simple + pretokenized_input_pair] output_r = tokenizer_r.batch_encode_plus(input_batch, **batch_kwargs) output_p = tokenizer_p.batch_encode_plus(input_batch, **batch_kwargs) for key in output_p.keys(): self.assertEqual(output_p[key], output_r[key]) # Test encode for pretokenized inputs pairs output_r = tokenizer_r.encode( pretokenized_input_simple, pretokenized_input_pair, is_split_into_words=True ) output_p = tokenizer_p.encode( pretokenized_input_simple, pretokenized_input_pair, is_split_into_words=True ) self.assertEqual(output_p, output_r) # Test encode_plus for pretokenized inputs output_r = tokenizer_r.encode_plus(pretokenized_input_simple, pretokenized_input_pair, **kwargs) output_p = tokenizer_p.encode_plus(pretokenized_input_simple, pretokenized_input_pair, **kwargs) for key in output_p.keys(): self.assertEqual(output_p[key], output_r[key]) # Test batch_encode_plus for pretokenized inputs input_batch_pair = ([pretokenized_input_simple, pretokenized_input_pair] * 2) + [ pretokenized_input_simple + pretokenized_input_pair, pretokenized_input_pair, ] output_r = tokenizer_r.batch_encode_plus(input_batch_pair, **batch_kwargs) output_p = tokenizer_p.batch_encode_plus(input_batch_pair, **batch_kwargs) for key in output_p.keys(): self.assertEqual(output_p[key], output_r[key]) def test_create_token_type_ids(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) input_simple = [1, 2, 3] input_pair = [1, 2, 3] # Generate output output_r = tokenizer_r.create_token_type_ids_from_sequences(input_simple) output_p = tokenizer_p.create_token_type_ids_from_sequences(input_simple) self.assertEqual(output_p, output_r) # Generate pair output output_r = tokenizer_r.create_token_type_ids_from_sequences(input_simple, input_pair) output_p = tokenizer_p.create_token_type_ids_from_sequences(input_simple, input_pair) self.assertEqual(output_p, output_r) def test_build_inputs_with_special_tokens(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) # # Input string # input_simple = tokenizer_p.tokenize("This is a sample input", add_special_tokens=False) # input_pair = tokenizer_p.tokenize("This is a sample pair", add_special_tokens=False) # # Generate output # output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple) # output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple) # self.assertEqual(output_p, output_r) # # Generate pair output # output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair) # output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple, input_pair) # self.assertEqual(output_p, output_r) # Input tokens id input_simple = tokenizer_p.encode("This is a sample input", add_special_tokens=False) input_pair = tokenizer_p.encode("This is a sample pair", add_special_tokens=False) # Generate output output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple) output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple) self.assertEqual(output_p, output_r) # Generate pair output output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair) output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple, input_pair) self.assertEqual(output_p, output_r) def test_padding(self, max_length=50): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id) pad_token_id = tokenizer_p.pad_token_id # Encode - Simple input input_r = tokenizer_r.encode("This is a simple input", max_length=max_length, pad_to_max_length=True) input_p = tokenizer_p.encode("This is a simple input", max_length=max_length, pad_to_max_length=True) self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.encode("This is a simple input", max_length=max_length, padding="max_length") input_p = tokenizer_p.encode("This is a simple input", max_length=max_length, padding="max_length") self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.encode("This is a simple input", padding="longest") input_p = tokenizer_p.encode("This is a simple input", padding=True) self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id) # Encode - Pair input input_r = tokenizer_r.encode( "This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True ) input_p = tokenizer_p.encode( "This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True ) self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.encode( "This is a simple input", "This is a pair", max_length=max_length, padding="max_length" ) input_p = tokenizer_p.encode( "This is a simple input", "This is a pair", max_length=max_length, padding="max_length" ) self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.encode("This is a simple input", "This is a pair", padding=True) input_p = tokenizer_p.encode("This is a simple input", "This is a pair", padding="longest") self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id) # Encode_plus - Simple input input_r = tokenizer_r.encode_plus( "This is a simple input", max_length=max_length, pad_to_max_length=True ) input_p = tokenizer_p.encode_plus( "This is a simple input", max_length=max_length, pad_to_max_length=True ) self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) input_r = tokenizer_r.encode_plus( "This is a simple input", max_length=max_length, padding="max_length" ) input_p = tokenizer_p.encode_plus( "This is a simple input", max_length=max_length, padding="max_length" ) self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) input_r = tokenizer_r.encode_plus("This is a simple input", padding="longest") input_p = tokenizer_p.encode_plus("This is a simple input", padding=True) self.assert_padded_input_match( input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id ) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) # Encode_plus - Pair input input_r = tokenizer_r.encode_plus( "This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True ) input_p = tokenizer_p.encode_plus( "This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True ) self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) input_r = tokenizer_r.encode_plus( "This is a simple input", "This is a pair", max_length=max_length, padding="max_length" ) input_p = tokenizer_p.encode_plus( "This is a simple input", "This is a pair", max_length=max_length, padding="max_length" ) self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) input_r = tokenizer_r.encode_plus("This is a simple input", "This is a pair", padding="longest") input_p = tokenizer_p.encode_plus("This is a simple input", "This is a pair", padding=True) self.assert_padded_input_match( input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id ) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) # Batch_encode_plus - Simple input input_r = tokenizer_r.batch_encode_plus( ["This is a simple input 1", "This is a simple input 2"], max_length=max_length, pad_to_max_length=True, ) input_p = tokenizer_p.batch_encode_plus( ["This is a simple input 1", "This is a simple input 2"], max_length=max_length, pad_to_max_length=True, ) self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.batch_encode_plus( ["This is a simple input 1", "This is a simple input 2"], max_length=max_length, padding="max_length", ) input_p = tokenizer_p.batch_encode_plus( ["This is a simple input 1", "This is a simple input 2"], max_length=max_length, padding="max_length", ) self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.batch_encode_plus( ["This is a simple input 1", "This is a simple input 2"], max_length=max_length, padding="longest", ) input_p = tokenizer_p.batch_encode_plus( ["This is a simple input 1", "This is a simple input 2"], max_length=max_length, padding=True, ) self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) input_r = tokenizer_r.batch_encode_plus( ["This is a simple input 1", "This is a simple input 2"], padding="longest" ) input_p = tokenizer_p.batch_encode_plus( ["This is a simple input 1", "This is a simple input 2"], padding=True ) self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) # Batch_encode_plus - Pair input input_r = tokenizer_r.batch_encode_plus( [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ], max_length=max_length, truncation=True, padding="max_length", ) input_p = tokenizer_p.batch_encode_plus( [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ], max_length=max_length, truncation=True, padding="max_length", ) self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.batch_encode_plus( [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ], padding=True, ) input_p = tokenizer_p.batch_encode_plus( [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ], padding="longest", ) self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) # Using pad on single examples after tokenization input_r = tokenizer_r.encode_plus("This is a input 1") input_r = tokenizer_r.pad(input_r) input_p = tokenizer_p.encode_plus("This is a input 1") input_p = tokenizer_p.pad(input_p) self.assert_padded_input_match( input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id ) # Using pad on single examples after tokenization input_r = tokenizer_r.encode_plus("This is a input 1") input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length") input_p = tokenizer_p.encode_plus("This is a input 1") input_p = tokenizer_p.pad(input_p, max_length=max_length, padding="max_length") self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) # Using pad after tokenization input_r = tokenizer_r.batch_encode_plus( ["This is a input 1", "This is a much longer input whilch should be padded"] ) input_r = tokenizer_r.pad(input_r) input_p = tokenizer_p.batch_encode_plus( ["This is a input 1", "This is a much longer input whilch should be padded"] ) input_p = tokenizer_p.pad(input_p) self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) # Using pad after tokenization input_r = tokenizer_r.batch_encode_plus( ["This is a input 1", "This is a much longer input whilch should be padded"] ) input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length") input_p = tokenizer_p.batch_encode_plus( ["This is a input 1", "This is a much longer input whilch should be padded"] ) input_p = tokenizer_p.pad(input_p, max_length=max_length, padding="max_length") self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) # Test padding nested empty lists (in some use-cases, there is no any token id in the `input_ids` list). input_r = tokenizer_r.pad({"input_ids": [[], []]}, max_length=max_length, padding="max_length") input_p = tokenizer_p.pad({"input_ids": [[], []]}, max_length=max_length, padding="max_length") self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) def test_padding_different_model_input_name(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id) pad_token_id = tokenizer_p.pad_token_id input_r = tokenizer_r.batch_encode_plus( ["This is a input 1", "This is a much longer input whilch should be padded"] ) input_p = tokenizer_r.batch_encode_plus( ["This is a input 1", "This is a much longer input whilch should be padded"] ) # rename encoded batch to "inputs" input_r["inputs"] = input_r[tokenizer_r.model_input_names[0]] del input_r[tokenizer_r.model_input_names[0]] input_p["inputs"] = input_p[tokenizer_p.model_input_names[0]] del input_p[tokenizer_p.model_input_names[0]] # Renaming `input_ids` to `inputs` tokenizer_r.model_input_names = ["inputs"] + tokenizer_r.model_input_names[1:] tokenizer_p.model_input_names = ["inputs"] + tokenizer_p.model_input_names[1:] input_r = tokenizer_r.pad(input_r, padding="longest") input_p = tokenizer_r.pad(input_p, padding="longest") max_length = len(input_p["inputs"][0]) self.assert_batch_padded_input_match( input_r, input_p, max_length, pad_token_id, model_main_input_name="inputs" ) def test_save_pretrained(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) # make sure that all ".json" files are saved in the correct format for file_path in tokenizer_r_files + tokenizer_p_files: if os.path.exists(file_path) and file_path.endswith(".json"): check_json_file_has_correct_format(file_path) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) tokenizer_r_files = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f) self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files) # Checks everything loads correctly in the same way tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(tmpdirname2) # Save tokenizer rust, legacy_format=True tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=True) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) # Checks it save with the same files self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files) # Checks everything loads correctly in the same way tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) shutil.rmtree(tmpdirname2) # Save tokenizer rust, legacy_format=False tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=False) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) shutil.rmtree(tmpdirname2) def test_embeded_special_tokens(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) sentence = "A, <mask> AllenNLP sentence." tokens_r = tokenizer_r.encode_plus( sentence, add_special_tokens=True, ) tokens_p = tokenizer_p.encode_plus( sentence, add_special_tokens=True, ) for key in tokens_p.keys(): self.assertEqual(tokens_r[key], tokens_p[key]) if "token_type_ids" in tokens_r: self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"])) tokens_r = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"]) tokens_p = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"]) self.assertSequenceEqual(tokens_r, tokens_p) def test_compare_add_special_tokens(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) simple_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=False) # pair_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=True) for text in ["", " "]: # tokenize() no_special_tokens = tokenizer_r.tokenize(text, add_special_tokens=False) with_special_tokens = tokenizer_r.tokenize(text, add_special_tokens=True) self.assertEqual( len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add ) # encode() no_special_tokens = tokenizer_r.encode(text, add_special_tokens=False) with_special_tokens = tokenizer_r.encode(text, add_special_tokens=True) self.assertEqual( len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add ) # encode_plus() no_special_tokens = tokenizer_r.encode_plus(text, add_special_tokens=False) with_special_tokens = tokenizer_r.encode_plus(text, add_special_tokens=True) for key in no_special_tokens.keys(): self.assertEqual( len(no_special_tokens[key]), len(with_special_tokens[key]) - simple_num_special_tokens_to_add, ) # # batch_encode_plus no_special_tokens = tokenizer_r.batch_encode_plus([text, text], add_special_tokens=False) with_special_tokens = tokenizer_r.batch_encode_plus([text, text], add_special_tokens=True) for key in no_special_tokens.keys(): for i_no, i_with in zip(no_special_tokens[key], with_special_tokens[key]): self.assertEqual(len(i_no), len(i_with) - simple_num_special_tokens_to_add) def test_compare_prepare_for_model(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) string_sequence = "Asserting that both tokenizers are equal" python_output = tokenizer_p.prepare_for_model( tokenizer_p.encode(string_sequence, add_special_tokens=False) ) rust_output = tokenizer_r.prepare_for_model( tokenizer_r.encode(string_sequence, add_special_tokens=False) ) for key in python_output: self.assertEqual(python_output[key], rust_output[key]) def test_special_tokens_initialization(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): added_tokens = [AddedToken("<special>", lstrip=True)] tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) r_output = tokenizer_r.encode("Hey this is a <special> token") special_token_id = tokenizer_r.encode("<special>", add_special_tokens=False)[0] self.assertTrue(special_token_id in r_output) if self.test_slow_tokenizer: tokenizer_cr = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs, from_slow=True ) tokenizer_p = self.tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) p_output = tokenizer_p.encode("Hey this is a <special> token") cr_output = tokenizer_cr.encode("Hey this is a <special> token") self.assertEqual(p_output, r_output) self.assertEqual(cr_output, r_output) self.assertTrue(special_token_id in p_output) self.assertTrue(special_token_id in cr_output) def test_special_tokens_initialization_with_non_empty_additional_special_tokens(self): tokenizer_list = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer())) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer())) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(tmp_dir) with open(os.path.join(tmp_dir, "special_tokens_map.json"), encoding="utf-8") as json_file: special_tokens_map = json.load(json_file) with open(os.path.join(tmp_dir, "tokenizer_config.json"), encoding="utf-8") as json_file: tokenizer_config = json.load(json_file) special_tokens_map["additional_special_tokens"] = ["an_additional_special_token"] tokenizer_config["additional_special_tokens"] = ["an_additional_special_token"] with open(os.path.join(tmp_dir, "special_tokens_map.json"), "w", encoding="utf-8") as outfile: json.dump(special_tokens_map, outfile) with open(os.path.join(tmp_dir, "tokenizer_config.json"), "w", encoding="utf-8") as outfile: json.dump(tokenizer_config, outfile) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files tokenizer_without_change_in_init = tokenizer_class.from_pretrained( tmp_dir, ) self.assertIn( "an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens ) self.assertIn("an_additional_special_token", tokenizer_without_change_in_init.get_vocab()) self.assertEqual( ["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"]) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained new_added_tokens = [AddedToken("a_new_additional_special_token", lstrip=True)] tokenizer = tokenizer_class.from_pretrained( tmp_dir, additional_special_tokens=new_added_tokens, ) self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens) self.assertEqual( ["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"]) ), ) def test_training_new_tokenizer(self): # This feature only exists for fast tokenizers if not self.test_rust_tokenizer: return tokenizer = self.get_rust_tokenizer() new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100) # Test we can use the new tokenizer with something not seen during training inputs = new_tokenizer(["This is the first sentence", "This sentence is different 🤗."]) self.assertEqual(len(inputs["input_ids"]), 2) decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True) expected_result = "This is the first sentence" if tokenizer.backend_tokenizer.normalizer is not None: expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result) self.assertEqual(expected_result, decoded_input) # We check that the parameters of the tokenizer remained the same # Check we have the same number of added_tokens for both pair and non-pair inputs. self.assertEqual(tokenizer.num_special_tokens_to_add(False), new_tokenizer.num_special_tokens_to_add(False)) self.assertEqual(tokenizer.num_special_tokens_to_add(True), new_tokenizer.num_special_tokens_to_add(True)) # Check we have the correct max_length for both pair and non-pair inputs. self.assertEqual(tokenizer.max_len_single_sentence, new_tokenizer.max_len_single_sentence) self.assertEqual(tokenizer.max_len_sentences_pair, new_tokenizer.max_len_sentences_pair) # Assert the set of special tokens match as we didn't ask to change them self.assertSequenceEqual( tokenizer.all_special_tokens_extended, new_tokenizer.all_special_tokens_extended, ) self.assertDictEqual(tokenizer.special_tokens_map, new_tokenizer.special_tokens_map) def test_training_new_tokenizer_with_special_tokens_change(self): # This feature only exists for fast tokenizers if not self.test_rust_tokenizer: return tokenizer = self.get_rust_tokenizer() # Test with a special tokens map class_signature = inspect.signature(tokenizer.__class__) if "cls_token" in class_signature.parameters: new_tokenizer = tokenizer.train_new_from_iterator( SMALL_TRAINING_CORPUS, 100, special_tokens_map={tokenizer.cls_token: "<cls>"} ) cls_id = new_tokenizer.get_vocab()["<cls>"] self.assertEqual(new_tokenizer.cls_token, "<cls>") self.assertEqual(new_tokenizer.cls_token_id, cls_id) # Create a new mapping from the special tokens defined in the original tokenizer special_tokens_list = SpecialTokensMixin.SPECIAL_TOKENS_ATTRIBUTES.copy() special_tokens_list.remove("additional_special_tokens") special_tokens_map = {} for token in special_tokens_list: # Get the private one to avoid unnecessary warnings. if getattr(tokenizer, f"_{token}") is not None: special_token = getattr(tokenizer, token) special_tokens_map[special_token] = f"{special_token}a" # Train new tokenizer new_tokenizer = tokenizer.train_new_from_iterator( SMALL_TRAINING_CORPUS, 100, special_tokens_map=special_tokens_map ) # Check the changes for token in special_tokens_list: # Get the private one to avoid unnecessary warnings. if getattr(tokenizer, f"_{token}") is None: continue special_token = getattr(tokenizer, token) if special_token in special_tokens_map: new_special_token = getattr(new_tokenizer, token) self.assertEqual(special_tokens_map[special_token], new_special_token) new_id = new_tokenizer.get_vocab()[new_special_token] self.assertEqual(getattr(new_tokenizer, f"{token}_id"), new_id) # Check if the AddedToken / string format has been kept for special_token in tokenizer.all_special_tokens_extended: if isinstance(special_token, AddedToken) and special_token.content not in special_tokens_map: # The special token must appear identically in the list of the new tokenizer. self.assertTrue( special_token in new_tokenizer.all_special_tokens_extended, f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}", ) elif isinstance(special_token, AddedToken): # The special token must appear in the list of the new tokenizer as an object of type AddedToken with # the same parameters as the old AddedToken except the content that the user has requested to change. special_token_str = special_token.content new_special_token_str = special_tokens_map[special_token_str] find = False for candidate in new_tokenizer.all_special_tokens_extended: if ( isinstance(candidate, AddedToken) and candidate.content == new_special_token_str and candidate.lstrip == special_token.lstrip and candidate.rstrip == special_token.rstrip and candidate.normalized == special_token.normalized and candidate.single_word == special_token.single_word ): find = True break self.assertTrue( find, f"'{new_special_token_str}' doesn't appear in the list " f"'{new_tokenizer.all_special_tokens_extended}' as an AddedToken with the same parameters as " f"'{special_token}' in the list {tokenizer.all_special_tokens_extended}", ) elif special_token not in special_tokens_map: # The special token must appear identically in the list of the new tokenizer. self.assertTrue( special_token in new_tokenizer.all_special_tokens_extended, f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}", ) else: # The special token must appear in the list of the new tokenizer as an object of type string. self.assertTrue(special_tokens_map[special_token] in new_tokenizer.all_special_tokens_extended) # Test we can use the new tokenizer with something not seen during training inputs = new_tokenizer(["This is the first sentence", "This sentence is different 🤗."]) self.assertEqual(len(inputs["input_ids"]), 2) decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True) expected_result = "This is the first sentence" if tokenizer.backend_tokenizer.normalizer is not None: expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result) self.assertEqual(expected_result, decoded_input) def test_tokenizer_mismatch_warning(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): with self.assertLogs("transformers", level="WARNING") as cm: try: if self.tokenizer_class == BertTokenizer: AlbertTokenizer.from_pretrained(pretrained_name) else: BertTokenizer.from_pretrained(pretrained_name) except EnvironmentError as e: # Some tokenizer will raised an error before reaching the logged warning because there are no # corresponding files to load error_message = str(e) except (TypeError, AttributeError): # Some tokenizers cannot be loaded into the target tokenizer at all and errors are returned, # here we just check that the warning has been logged before the error is raised pass finally: logged_msg_target = ( "The tokenizer class you load from this checkpoint is not the same type as the class " "this function is called from." ) raised_error_msg_target = "Can't load tokenizer for" self.assertTrue( cm.records[0].message.startswith(logged_msg_target) if len(cm.records) > 0 else False or raised_error_msg_target in error_message ) try: if self.rust_tokenizer_class == BertTokenizerFast: AlbertTokenizerFast.from_pretrained(pretrained_name) else: BertTokenizerFast.from_pretrained(pretrained_name) except (TypeError, AttributeError): # Some tokenizers cannot be loaded into the target tokenizer at all and errors are returned, # here we just check that the warning has been logged before the error is raised pass finally: self.assertTrue( cm.records[0].message.startswith( "The tokenizer class you load from this checkpoint is not the same type as the class" " this function is called from." ) ) @require_torch def test_saving_tokenizer_trainer(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): with tempfile.TemporaryDirectory() as tmp_dir: # Save the fast tokenizer files in a temporary directory tokenizer_old = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs, use_fast=True) tokenizer_old.save_pretrained(tmp_dir, legacy_format=False) # save only fast version # Initialize toy model for the trainer model = nn.Module() # Load tokenizer from a folder without legacy files tokenizer = self.rust_tokenizer_class.from_pretrained(tmp_dir) training_args = TrainingArguments(output_dir=tmp_dir, do_train=True, no_cuda=True) trainer = Trainer(model=model, args=training_args, tokenizer=tokenizer) # Should not raise an error trainer.save_model(os.path.join(tmp_dir, "checkpoint")) self.assertIn("tokenizer.json", os.listdir(os.path.join(tmp_dir, "checkpoint"))) def test_convert_tokens_to_string_format(self): tokenizers = self.get_tokenizers(fast=True, do_lower_case=True) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): tokens = ["this", "is", "a", "test"] string = tokenizer.convert_tokens_to_string(tokens) self.assertIsInstance(string, str) def test_save_slow_from_fast_and_reload_fast(self): if not self.test_slow_tokenizer or not self.test_rust_tokenizer: # we need both slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): with tempfile.TemporaryDirectory() as tmp_dir_1: # Here we check that even if we have initialized a fast tokenizer with a tokenizer_file we can # still save only the slow version and use these saved files to rebuild a tokenizer tokenizer_fast_old_1 = self.rust_tokenizer_class.from_pretrained( pretrained_name, **kwargs, use_fast=True ) tokenizer_file = os.path.join(tmp_dir_1, "tokenizer.json") tokenizer_fast_old_1.backend_tokenizer.save(tokenizer_file) tokenizer_fast_old_2 = self.rust_tokenizer_class.from_pretrained( pretrained_name, **kwargs, use_fast=True, tokenizer_file=tokenizer_file ) tokenizer_fast_old_2.save_pretrained(tmp_dir_1, legacy_format=True) # save only slow version tokenizer_slow = self.tokenizer_class.from_pretrained(tmp_dir_1) with tempfile.TemporaryDirectory() as tmp_dir_2: tokenizer_slow.save_pretrained(tmp_dir_2) # Should not raise an error self.rust_tokenizer_class.from_pretrained(tmp_dir_2) def test_clean_up_tokenization_spaces(self): tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") assert tokenizer.clean_up_tokenization_spaces is True tokens = tokenizer.encode("This shouldn't be! He'll go.") decoded = tokenizer.decode(tokens) assert decoded == "[CLS] this shouldn't be! he'll go. [SEP]" tokenizer.clean_up_tokenization_spaces = False decoded = tokenizer.decode(tokens) assert decoded == "[CLS] this shouldn ' t be ! he ' ll go . [SEP]" assert decoded == tokenizer.decode(tokens, clean_up_tokenization_spaces=False) # Fast from slow with tempfile.TemporaryDirectory() as tmp_dir_2: tokenizer.save_pretrained(tmp_dir_2) tokenizer_fast = BertTokenizerFast.from_pretrained(tmp_dir_2) del tokenizer assert tokenizer_fast.clean_up_tokenization_spaces is False decoded = tokenizer_fast.decode(tokens) # fast and slow don't have the same output when we don't cleanup # tokenization space. Here `be!` vs `be !` and `go.` vs `go .` assert decoded == "[CLS] this shouldn ' t be! he ' ll go. [SEP]" tokenizer_fast.clean_up_tokenization_spaces = True assert tokenizer_fast.clean_up_tokenization_spaces is True decoded = tokenizer_fast.decode(tokens) assert decoded == "[CLS] this shouldn't be! he'll go. [SEP]" # Slow from fast with tempfile.TemporaryDirectory() as tmp_dir_2: tokenizer_fast.clean_up_tokenization_spaces = False tokenizer_fast.save_pretrained(tmp_dir_2) tokenizer = BertTokenizer.from_pretrained(tmp_dir_2) assert tokenizer.clean_up_tokenization_spaces is False decoded = tokenizer.decode(tokens) assert decoded == "[CLS] this shouldn ' t be ! he ' ll go . [SEP]" tokenizer.clean_up_tokenization_spaces = True decoded = tokenizer.decode(tokens) assert decoded == "[CLS] this shouldn't be! he'll go. [SEP]"
203,157
50.354398
149
py
transformers
transformers-main/tests/test_image_transforms.py
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from parameterized import parameterized from transformers.testing_utils import require_flax, require_tf, require_torch, require_vision from transformers.utils.import_utils import is_flax_available, is_tf_available, is_torch_available, is_vision_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf if is_flax_available(): import jax if is_vision_available(): import PIL.Image from transformers.image_transforms import ( center_crop, center_to_corners_format, convert_to_rgb, corners_to_center_format, flip_channel_order, get_resize_output_image_size, id_to_rgb, normalize, pad, resize, rgb_to_id, to_channel_dimension_format, to_pil_image, ) def get_random_image(height, width, num_channels=3, channels_first=True): shape = (num_channels, height, width) if channels_first else (height, width, num_channels) random_array = np.random.randint(0, 256, shape, dtype=np.uint8) return random_array @require_vision class ImageTransformsTester(unittest.TestCase): @parameterized.expand( [ ("numpy_float_channels_first", (3, 4, 5), np.float32), ("numpy_float_channels_last", (4, 5, 3), np.float32), ("numpy_float_channels_first", (3, 4, 5), np.float64), ("numpy_float_channels_last", (4, 5, 3), np.float64), ("numpy_int_channels_first", (3, 4, 5), np.int32), ("numpy_uint_channels_first", (3, 4, 5), np.uint8), ] ) @require_vision def test_to_pil_image(self, name, image_shape, dtype): image = np.random.randint(0, 256, image_shape).astype(dtype) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) # make sure image is correctly rescaled self.assertTrue(np.abs(np.asarray(pil_image)).sum() > 0) @parameterized.expand( [ ("numpy_float_channels_first", (3, 4, 5), np.float32), ("numpy_float_channels_first", (3, 4, 5), np.float64), ("numpy_float_channels_last", (4, 5, 3), np.float32), ("numpy_float_channels_last", (4, 5, 3), np.float64), ] ) @require_vision def test_to_pil_image_from_float(self, name, image_shape, dtype): image = np.random.rand(*image_shape).astype(dtype) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) # make sure image is correctly rescaled self.assertTrue(np.abs(np.asarray(pil_image)).sum() > 0) # Make sure that an exception is raised if image is not in [0, 1] image = np.random.randn(*image_shape).astype(dtype) with self.assertRaises(ValueError): to_pil_image(image) @require_vision def test_to_pil_image_from_mask(self): # Make sure binary mask remains a binary mask image = np.random.randint(0, 2, (3, 4, 5)).astype(np.uint8) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) np_img = np.asarray(pil_image) self.assertTrue(np_img.min() == 0) self.assertTrue(np_img.max() == 1) image = np.random.randint(0, 2, (3, 4, 5)).astype(np.float32) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) np_img = np.asarray(pil_image) self.assertTrue(np_img.min() == 0) self.assertTrue(np_img.max() == 1) @require_tf def test_to_pil_image_from_tensorflow(self): # channels_first image = tf.random.uniform((3, 4, 5)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) # channels_last image = tf.random.uniform((4, 5, 3)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) @require_torch def test_to_pil_image_from_torch(self): # channels first image = torch.rand((3, 4, 5)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) # channels last image = torch.rand((4, 5, 3)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) @require_flax def test_to_pil_image_from_jax(self): key = jax.random.PRNGKey(0) # channel first image = jax.random.uniform(key, (3, 4, 5)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) # channel last image = jax.random.uniform(key, (4, 5, 3)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) def test_to_channel_dimension_format(self): # Test that function doesn't reorder if channel dim matches the input. image = np.random.rand(3, 4, 5) image = to_channel_dimension_format(image, "channels_first") self.assertEqual(image.shape, (3, 4, 5)) image = np.random.rand(4, 5, 3) image = to_channel_dimension_format(image, "channels_last") self.assertEqual(image.shape, (4, 5, 3)) # Test that function reorders if channel dim doesn't match the input. image = np.random.rand(3, 4, 5) image = to_channel_dimension_format(image, "channels_last") self.assertEqual(image.shape, (4, 5, 3)) image = np.random.rand(4, 5, 3) image = to_channel_dimension_format(image, "channels_first") self.assertEqual(image.shape, (3, 4, 5)) def test_get_resize_output_image_size(self): image = np.random.randint(0, 256, (3, 224, 224)) # Test the output size defaults to (x, x) if an int is given. self.assertEqual(get_resize_output_image_size(image, 10), (10, 10)) self.assertEqual(get_resize_output_image_size(image, [10]), (10, 10)) self.assertEqual(get_resize_output_image_size(image, (10,)), (10, 10)) # Test the output size is the same as the input if a two element tuple/list is given. self.assertEqual(get_resize_output_image_size(image, (10, 20)), (10, 20)) self.assertEqual(get_resize_output_image_size(image, [10, 20]), (10, 20)) self.assertEqual(get_resize_output_image_size(image, (10, 20), default_to_square=True), (10, 20)) # To match pytorch behaviour, max_size is only relevant if size is an int self.assertEqual(get_resize_output_image_size(image, (10, 20), max_size=5), (10, 20)) # Test output size = (int(size * height / width), size) if size is an int and height > width image = np.random.randint(0, 256, (3, 50, 40)) self.assertEqual(get_resize_output_image_size(image, 20, default_to_square=False), (25, 20)) # Test output size = (size, int(size * width / height)) if size is an int and width <= height image = np.random.randint(0, 256, (3, 40, 50)) self.assertEqual(get_resize_output_image_size(image, 20, default_to_square=False), (20, 25)) # Test size is resized if longer size > max_size image = np.random.randint(0, 256, (3, 50, 40)) self.assertEqual(get_resize_output_image_size(image, 20, default_to_square=False, max_size=22), (22, 17)) # Test correct channel dimension is returned if output size if height == 3 # Defaults to input format - channels first image = np.random.randint(0, 256, (3, 18, 97)) resized_image = resize(image, (3, 20)) self.assertEqual(resized_image.shape, (3, 3, 20)) # Defaults to input format - channels last image = np.random.randint(0, 256, (18, 97, 3)) resized_image = resize(image, (3, 20)) self.assertEqual(resized_image.shape, (3, 20, 3)) image = np.random.randint(0, 256, (3, 18, 97)) resized_image = resize(image, (3, 20), data_format="channels_last") self.assertEqual(resized_image.shape, (3, 20, 3)) image = np.random.randint(0, 256, (18, 97, 3)) resized_image = resize(image, (3, 20), data_format="channels_first") self.assertEqual(resized_image.shape, (3, 3, 20)) def test_resize(self): image = np.random.randint(0, 256, (3, 224, 224)) # Check the channel order is the same by default resized_image = resize(image, (30, 40)) self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (3, 30, 40)) # Check channel order is changed if specified resized_image = resize(image, (30, 40), data_format="channels_last") self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (30, 40, 3)) # Check PIL.Image.Image is returned if return_numpy=False resized_image = resize(image, (30, 40), return_numpy=False) self.assertIsInstance(resized_image, PIL.Image.Image) # PIL size is in (width, height) order self.assertEqual(resized_image.size, (40, 30)) # Check an image with float values between 0-1 is returned with values in this range image = np.random.rand(3, 224, 224) resized_image = resize(image, (30, 40)) self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (3, 30, 40)) self.assertTrue(np.all(resized_image >= 0)) self.assertTrue(np.all(resized_image <= 1)) def test_normalize(self): image = np.random.randint(0, 256, (224, 224, 3)) / 255 # Test that exception is raised if inputs are incorrect # Not a numpy array image with self.assertRaises(ValueError): normalize(5, 5, 5) # Number of mean values != number of channels with self.assertRaises(ValueError): normalize(image, mean=(0.5, 0.6), std=1) # Number of std values != number of channels with self.assertRaises(ValueError): normalize(image, mean=1, std=(0.5, 0.6)) # Test result is correct - output data format is channels_first and normalization # correctly computed mean = (0.5, 0.6, 0.7) std = (0.1, 0.2, 0.3) expected_image = ((image - mean) / std).transpose((2, 0, 1)) normalized_image = normalize(image, mean=mean, std=std, data_format="channels_first") self.assertIsInstance(normalized_image, np.ndarray) self.assertEqual(normalized_image.shape, (3, 224, 224)) self.assertTrue(np.allclose(normalized_image, expected_image)) def test_center_crop(self): image = np.random.randint(0, 256, (3, 224, 224)) # Test that exception is raised if inputs are incorrect with self.assertRaises(ValueError): center_crop(image, 10) # Test result is correct - output data format is channels_first and center crop # correctly computed expected_image = image[:, 52:172, 82:142].transpose(1, 2, 0) cropped_image = center_crop(image, (120, 60), data_format="channels_last") self.assertIsInstance(cropped_image, np.ndarray) self.assertEqual(cropped_image.shape, (120, 60, 3)) self.assertTrue(np.allclose(cropped_image, expected_image)) # Test that image is padded with zeros if crop size is larger than image size expected_image = np.zeros((300, 260, 3)) expected_image[38:262, 18:242, :] = image.transpose((1, 2, 0)) cropped_image = center_crop(image, (300, 260), data_format="channels_last") self.assertIsInstance(cropped_image, np.ndarray) self.assertEqual(cropped_image.shape, (300, 260, 3)) self.assertTrue(np.allclose(cropped_image, expected_image)) def test_center_to_corners_format(self): bbox_center = np.array([[10, 20, 4, 8], [15, 16, 3, 4]]) expected = np.array([[8, 16, 12, 24], [13.5, 14, 16.5, 18]]) self.assertTrue(np.allclose(center_to_corners_format(bbox_center), expected)) # Check that the function and inverse function are inverse of each other self.assertTrue(np.allclose(corners_to_center_format(center_to_corners_format(bbox_center)), bbox_center)) def test_corners_to_center_format(self): bbox_corners = np.array([[8, 16, 12, 24], [13.5, 14, 16.5, 18]]) expected = np.array([[10, 20, 4, 8], [15, 16, 3, 4]]) self.assertTrue(np.allclose(corners_to_center_format(bbox_corners), expected)) # Check that the function and inverse function are inverse of each other self.assertTrue(np.allclose(center_to_corners_format(corners_to_center_format(bbox_corners)), bbox_corners)) def test_rgb_to_id(self): # test list input rgb = [125, 4, 255] self.assertEqual(rgb_to_id(rgb), 16712829) # test numpy array input color = np.array( [ [ [213, 54, 165], [88, 207, 39], [156, 108, 128], ], [ [183, 194, 46], [137, 58, 88], [114, 131, 233], ], ] ) expected = np.array([[10827477, 2608984, 8416412], [3064503, 5782153, 15303538]]) self.assertTrue(np.allclose(rgb_to_id(color), expected)) def test_id_to_rgb(self): # test int input self.assertEqual(id_to_rgb(16712829), [125, 4, 255]) # test array input id_array = np.array([[10827477, 2608984, 8416412], [3064503, 5782153, 15303538]]) color = np.array( [ [ [213, 54, 165], [88, 207, 39], [156, 108, 128], ], [ [183, 194, 46], [137, 58, 88], [114, 131, 233], ], ] ) self.assertTrue(np.allclose(id_to_rgb(id_array), color)) def test_pad(self): # fmt: off image = np.array([[ [0, 1], [2, 3], ]]) # fmt: on # Test that exception is raised if unknown padding mode is specified with self.assertRaises(ValueError): pad(image, 10, mode="unknown") # Test that exception is raised if invalid padding is specified with self.assertRaises(ValueError): # Cannot pad on channel dimension pad(image, (5, 10, 10)) # Test image is padded equally on all sides is padding is an int # fmt: off expected_image = np.array([ [[0, 0, 0, 0], [0, 0, 1, 0], [0, 2, 3, 0], [0, 0, 0, 0]], ]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, 1))) # Test the left and right of each axis is padded (pad_left, pad_right) # fmt: off expected_image = np.array( [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 2, 3, 0], [0, 0, 0, 0, 0]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, (2, 1)))) # Test only one axis is padded (pad_left, pad_right) # fmt: off expected_image = np.array([[ [9, 9], [9, 9], [0, 1], [2, 3], [9, 9] ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((2, 1), (0, 0)), constant_values=9))) # Test padding with a constant value # fmt: off expected_image = np.array([[ [8, 8, 0, 1, 9], [8, 8, 2, 3, 9], [8, 8, 7, 7, 9], [8, 8, 7, 7, 9] ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), constant_values=((6, 7), (8, 9))))) # fmt: off image = np.array([[ [0, 1, 2], [3, 4, 5], [6, 7, 8], ]]) # fmt: on # Test padding with PaddingMode.REFLECT # fmt: off expected_image = np.array([[ [2, 1, 0, 1, 2, 1], [5, 4, 3, 4, 5, 4], [8, 7, 6, 7, 8, 7], [5, 4, 3, 4, 5, 4], [2, 1, 0, 1, 2, 1], ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="reflect"))) # Test padding with PaddingMode.REPLICATE # fmt: off expected_image = np.array([[ [0, 0, 0, 1, 2, 2], [3, 3, 3, 4, 5, 5], [6, 6, 6, 7, 8, 8], [6, 6, 6, 7, 8, 8], [6, 6, 6, 7, 8, 8], ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="replicate"))) # Test padding with PaddingMode.SYMMETRIC # fmt: off expected_image = np.array([[ [1, 0, 0, 1, 2, 2], [4, 3, 3, 4, 5, 5], [7, 6, 6, 7, 8, 8], [7, 6, 6, 7, 8, 8], [4, 3, 3, 4, 5, 5], ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="symmetric"))) # Test we can specify the output data format # Test padding with PaddingMode.REFLECT # fmt: off image = np.array([[ [0, 1], [2, 3], ]]) expected_image = np.array([ [[0], [1], [0], [1], [0]], [[2], [3], [2], [3], [2]], [[0], [1], [0], [1], [0]], [[2], [3], [2], [3], [2]] ]) # fmt: on self.assertTrue( np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="reflect", data_format="channels_last")) ) @require_vision def test_convert_to_rgb(self): # Test that an RGBA image is converted to RGB image = np.array([[[1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.uint8) pil_image = PIL.Image.fromarray(image) self.assertEqual(pil_image.mode, "RGBA") self.assertEqual(pil_image.size, (2, 1)) # For the moment, numpy images are returned as is rgb_image = convert_to_rgb(image) self.assertEqual(rgb_image.shape, (1, 2, 4)) self.assertTrue(np.allclose(rgb_image, image)) # And PIL images are converted rgb_image = convert_to_rgb(pil_image) self.assertEqual(rgb_image.mode, "RGB") self.assertEqual(rgb_image.size, (2, 1)) self.assertTrue(np.allclose(np.array(rgb_image), np.array([[[1, 2, 3], [5, 6, 7]]], dtype=np.uint8))) # Test that a grayscale image is converted to RGB image = np.array([[0, 255]], dtype=np.uint8) pil_image = PIL.Image.fromarray(image) self.assertEqual(pil_image.mode, "L") self.assertEqual(pil_image.size, (2, 1)) rgb_image = convert_to_rgb(pil_image) self.assertEqual(rgb_image.mode, "RGB") self.assertEqual(rgb_image.size, (2, 1)) self.assertTrue(np.allclose(np.array(rgb_image), np.array([[[0, 0, 0], [255, 255, 255]]], dtype=np.uint8))) def test_flip_channel_order(self): # fmt: off img_channels_first = np.array([ [[ 0, 1, 2, 3], [ 4, 5, 6, 7]], [[ 8, 9, 10, 11], [12, 13, 14, 15]], [[16, 17, 18, 19], [20, 21, 22, 23]], ]) # fmt: on img_channels_last = np.moveaxis(img_channels_first, 0, -1) # fmt: off flipped_img_channels_first = np.array([ [[16, 17, 18, 19], [20, 21, 22, 23]], [[ 8, 9, 10, 11], [12, 13, 14, 15]], [[ 0, 1, 2, 3], [ 4, 5, 6, 7]], ]) # fmt: on flipped_img_channels_last = np.moveaxis(flipped_img_channels_first, 0, -1) self.assertTrue(np.allclose(flip_channel_order(img_channels_first), flipped_img_channels_first)) self.assertTrue( np.allclose(flip_channel_order(img_channels_first, "channels_last"), flipped_img_channels_last) ) self.assertTrue(np.allclose(flip_channel_order(img_channels_last), flipped_img_channels_last)) self.assertTrue( np.allclose(flip_channel_order(img_channels_last, "channels_first"), flipped_img_channels_first) )
21,561
37.366548
119
py
transformers
transformers-main/tests/test_modeling_flax_utils.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.12" # assumed parallelism: 8 @require_flax @is_staging_test class FlaxModelPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-model-flax") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-model-flax-org") except HTTPError: pass def test_push_to_hub(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = FlaxBertModel(config) model.push_to_hub("test-model-flax", use_auth_token=self._token) new_model = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax") base_params = flatten_dict(unfreeze(model.params)) new_params = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): max_diff = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # Reset repo delete_repo(token=self._token, repo_id="test-model-flax") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, repo_id="test-model-flax", push_to_hub=True, use_auth_token=self._token) new_model = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax") base_params = flatten_dict(unfreeze(model.params)) new_params = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): max_diff = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_push_to_hub_in_organization(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = FlaxBertModel(config) model.push_to_hub("valid_org/test-model-flax-org", use_auth_token=self._token) new_model = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org") base_params = flatten_dict(unfreeze(model.params)) new_params = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): max_diff = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-model-flax-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( tmp_dir, repo_id="valid_org/test-model-flax-org", push_to_hub=True, use_auth_token=self._token ) new_model = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org") base_params = flatten_dict(unfreeze(model.params)) new_params = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): max_diff = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def check_models_equal(model1, model2): models_are_equal = True flat_params_1 = flatten_dict(model1.params) flat_params_2 = flatten_dict(model2.params) for key in flat_params_1.keys(): if np.sum(np.abs(flat_params_1[key] - flat_params_2[key])) > 1e-4: models_are_equal = False return models_are_equal @require_flax class FlaxModelUtilsTest(unittest.TestCase): def test_model_from_pretrained_subfolder(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only") model = FlaxBertModel(config) subfolder = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(tmp_dir, subfolder)) with self.assertRaises(OSError): _ = FlaxBertModel.from_pretrained(tmp_dir) model_loaded = FlaxBertModel.from_pretrained(tmp_dir, subfolder=subfolder) self.assertTrue(check_models_equal(model, model_loaded)) def test_model_from_pretrained_subfolder_sharded(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only") model = FlaxBertModel(config) subfolder = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(tmp_dir, subfolder), max_shard_size="10KB") with self.assertRaises(OSError): _ = FlaxBertModel.from_pretrained(tmp_dir) model_loaded = FlaxBertModel.from_pretrained(tmp_dir, subfolder=subfolder) self.assertTrue(check_models_equal(model, model_loaded)) def test_model_from_pretrained_hub_subfolder(self): subfolder = "bert" model_id = "hf-internal-testing/tiny-random-bert-subfolder" with self.assertRaises(OSError): _ = FlaxBertModel.from_pretrained(model_id) model = FlaxBertModel.from_pretrained(model_id, subfolder=subfolder) self.assertIsNotNone(model) def test_model_from_pretrained_hub_subfolder_sharded(self): subfolder = "bert" model_id = "hf-internal-testing/tiny-random-bert-sharded-subfolder" with self.assertRaises(OSError): _ = FlaxBertModel.from_pretrained(model_id) model = FlaxBertModel.from_pretrained(model_id, subfolder=subfolder) self.assertIsNotNone(model)
6,855
35.663102
115
py
transformers
transformers-main/tests/test_feature_extraction_utils.py
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, Wav2Vec2FeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR = get_tests_dir("fixtures") class FeatureExtractorUtilTester(unittest.TestCase): def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = Wav2Vec2FeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2") # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = Wav2Vec2FeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2") # This check we did call the fake head request mock_head.assert_called() def test_legacy_load_from_url(self): # This test is for deprecated behavior and can be removed in v5 _ = Wav2Vec2FeatureExtractor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" ) @is_staging_test class FeatureExtractorPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-feature-extractor") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-feature-extractor-org") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-feature-extractor") except HTTPError: pass def test_push_to_hub(self): feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR) feature_extractor.push_to_hub("test-feature-extractor", use_auth_token=self._token) new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor") for k, v in feature_extractor.__dict__.items(): self.assertEqual(v, getattr(new_feature_extractor, k)) # Reset repo delete_repo(token=self._token, repo_id="test-feature-extractor") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( tmp_dir, repo_id="test-feature-extractor", push_to_hub=True, use_auth_token=self._token ) new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor") for k, v in feature_extractor.__dict__.items(): self.assertEqual(v, getattr(new_feature_extractor, k)) def test_push_to_hub_in_organization(self): feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR) feature_extractor.push_to_hub("valid_org/test-feature-extractor", use_auth_token=self._token) new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("valid_org/test-feature-extractor") for k, v in feature_extractor.__dict__.items(): self.assertEqual(v, getattr(new_feature_extractor, k)) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-feature-extractor") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( tmp_dir, repo_id="valid_org/test-feature-extractor-org", push_to_hub=True, use_auth_token=self._token ) new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org") for k, v in feature_extractor.__dict__.items(): self.assertEqual(v, getattr(new_feature_extractor, k)) def test_push_to_hub_dynamic_feature_extractor(self): CustomFeatureExtractor.register_for_auto_class() feature_extractor = CustomFeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR) feature_extractor.push_to_hub("test-dynamic-feature-extractor", use_auth_token=self._token) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map, {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"}, ) new_feature_extractor = AutoFeatureExtractor.from_pretrained( f"{USER}/test-dynamic-feature-extractor", trust_remote_code=True ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__, "CustomFeatureExtractor")
6,160
41.489655
135
py
transformers
transformers-main/tests/test_modeling_tf_utils.py
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import inspect import json import os import random import tempfile import unittest import unittest.mock as mock from huggingface_hub import HfFolder, Repository, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import is_tf_available, is_torch_available from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import ( # noqa: F401 TOKEN, USER, CaptureLogger, _tf_gpu_memory_limit, is_pt_tf_cross_test, is_staging_test, require_safetensors, require_tf, slow, ) from transformers.utils import SAFE_WEIGHTS_NAME, TF2_WEIGHTS_INDEX_NAME, TF2_WEIGHTS_NAME, logging logger = logging.get_logger(__name__) if is_tf_available(): import h5py import numpy as np import tensorflow as tf from transformers import ( BertConfig, PreTrainedModel, PushToHubCallback, RagRetriever, TFBertForMaskedLM, TFBertForSequenceClassification, TFBertModel, TFPreTrainedModel, TFRagModel, ) from transformers.modeling_tf_utils import tf_shard_checkpoint, unpack_inputs from transformers.tf_utils import stable_softmax tf.config.experimental.enable_tensor_float_32_execution(False) if _tf_gpu_memory_limit is not None: gpus = tf.config.list_physical_devices("GPU") for gpu in gpus: # Restrict TensorFlow to only allocate x GB of memory on the GPUs try: tf.config.set_logical_device_configuration( gpu, [tf.config.LogicalDeviceConfiguration(memory_limit=_tf_gpu_memory_limit)] ) logical_gpus = tf.config.list_logical_devices("GPU") print("Logical GPUs", logical_gpus) except RuntimeError as e: # Virtual devices must be set before GPUs have been initialized print(e) if is_torch_available(): from transformers import BertModel @require_tf class TFModelUtilsTest(unittest.TestCase): def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # This check we did call the fake head request mock_head.assert_called() def test_load_from_one_file(self): try: tmp_file = tempfile.mktemp() with open(tmp_file, "wb") as f: http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/tf_model.h5", f) config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") _ = TFBertModel.from_pretrained(tmp_file, config=config) finally: os.remove(tmp_file) def test_legacy_load_from_url(self): # This test is for deprecated behavior and can be removed in v5 config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") _ = TFBertModel.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/tf_model.h5", config=config ) # tests whether the unpack_inputs function behaves as expected def test_unpack_inputs(self): class DummyModel: def __init__(self): config_kwargs = {"output_attentions": False, "output_hidden_states": False, "return_dict": False} self.config = PretrainedConfig(**config_kwargs) self.main_input_name = "input_ids" @unpack_inputs def call( self, input_ids=None, past_key_values=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return input_ids, past_key_values, output_attentions, output_hidden_states, return_dict @unpack_inputs def foo(self, pixel_values, output_attentions=None, output_hidden_states=None, return_dict=None): return pixel_values, output_attentions, output_hidden_states, return_dict dummy_model = DummyModel() input_ids = tf.constant([0, 1, 2, 3], dtype=tf.int32) past_key_values = tf.constant([4, 5, 6, 7], dtype=tf.int32) pixel_values = tf.constant([8, 9, 10, 11], dtype=tf.int32) # test case 1: Pass inputs as keyword arguments; Booleans are inherited from the config. output = dummy_model.call(input_ids=input_ids, past_key_values=past_key_values) tf.debugging.assert_equal(output[0], input_ids) tf.debugging.assert_equal(output[1], past_key_values) self.assertFalse(output[2]) self.assertFalse(output[3]) self.assertFalse(output[4]) # test case 2: Same as above, but with positional arguments. output = dummy_model.call(input_ids, past_key_values) tf.debugging.assert_equal(output[0], input_ids) tf.debugging.assert_equal(output[1], past_key_values) self.assertFalse(output[2]) self.assertFalse(output[3]) self.assertFalse(output[4]) # test case 3: We can also pack everything in the first input. output = dummy_model.call(input_ids={"input_ids": input_ids, "past_key_values": past_key_values}) tf.debugging.assert_equal(output[0], input_ids) tf.debugging.assert_equal(output[1], past_key_values) self.assertFalse(output[2]) self.assertFalse(output[3]) self.assertFalse(output[4]) # test case 4: Explicit boolean arguments should override the config. output = dummy_model.call( input_ids=input_ids, past_key_values=past_key_values, output_attentions=False, return_dict=True ) tf.debugging.assert_equal(output[0], input_ids) tf.debugging.assert_equal(output[1], past_key_values) self.assertFalse(output[2]) self.assertFalse(output[3]) self.assertTrue(output[4]) # test case 5: Unexpected arguments should raise an exception. with self.assertRaises(ValueError): output = dummy_model.call(input_ids=input_ids, past_key_values=past_key_values, foo="bar") # test case 6: the decorator is independent from `main_input_name` -- it treats the first argument of the # decorated function as its main input. output = dummy_model.foo(pixel_values=pixel_values) tf.debugging.assert_equal(output[0], pixel_values) self.assertFalse(output[1]) self.assertFalse(output[2]) self.assertFalse(output[3]) # Tests whether the stable softmax is stable on CPU, with and without XLA def test_xla_stable_softmax(self): large_penalty = -1e9 n_tokens = 10 batch_size = 8 def masked_softmax(x, boolean_mask): numerical_mask = (1.0 - tf.cast(boolean_mask, dtype=tf.float32)) * large_penalty masked_x = x + numerical_mask return stable_softmax(masked_x) xla_masked_softmax = tf.function(masked_softmax, jit_compile=True) xla_stable_softmax = tf.function(stable_softmax, jit_compile=True) x = tf.random.normal((batch_size, n_tokens)) # Same outcome regardless of the boolean mask here masked_tokens = random.randint(0, n_tokens) boolean_mask = tf.convert_to_tensor([[1] * (n_tokens - masked_tokens) + [0] * masked_tokens], dtype=tf.int32) # We can randomly mask a random numerical input OUTSIDE XLA numerical_mask = (1.0 - tf.cast(boolean_mask, dtype=tf.float32)) * large_penalty masked_x = x + numerical_mask xla_out = xla_stable_softmax(masked_x) out = stable_softmax(masked_x) assert tf.experimental.numpy.allclose(xla_out, out) # The stable softmax has the same output as the original softmax unstable_out = tf.nn.softmax(masked_x) assert tf.experimental.numpy.allclose(unstable_out, out) # We can randomly mask a random numerical input INSIDE XLA xla_out = xla_masked_softmax(x, boolean_mask) out = masked_softmax(x, boolean_mask) assert tf.experimental.numpy.allclose(xla_out, out) def test_checkpoint_sharding_from_hub(self): model = TFBertModel.from_pretrained("ArthurZ/tiny-random-bert-sharded") # the model above is the same as the model below, just a sharded version. ref_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") for p1, p2 in zip(model.weights, ref_model.weights): assert np.allclose(p1.numpy(), p2.numpy()) def test_sharded_checkpoint_with_prefix(self): model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert", load_weight_prefix="a/b") sharded_model = TFBertModel.from_pretrained("ArthurZ/tiny-random-bert-sharded", load_weight_prefix="a/b") for p1, p2 in zip(model.weights, sharded_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) self.assertTrue(p1.name.startswith("a/b/")) self.assertTrue(p2.name.startswith("a/b/")) def test_sharded_checkpoint_transfer(self): # If this doesn't throw an error then the test passes TFBertForSequenceClassification.from_pretrained("ArthurZ/tiny-random-bert-sharded") @is_pt_tf_cross_test def test_checkpoint_sharding_local_from_pt(self): with tempfile.TemporaryDirectory() as tmp_dir: _ = Repository(local_dir=tmp_dir, clone_from="hf-internal-testing/tiny-random-bert-sharded") model = TFBertModel.from_pretrained(tmp_dir, from_pt=True) # the model above is the same as the model below, just a sharded pytorch version. ref_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") for p1, p2 in zip(model.weights, ref_model.weights): assert np.allclose(p1.numpy(), p2.numpy()) @is_pt_tf_cross_test def test_checkpoint_loading_with_prefix_from_pt(self): model = TFBertModel.from_pretrained( "hf-internal-testing/tiny-random-bert", from_pt=True, load_weight_prefix="a/b" ) ref_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert", from_pt=True) for p1, p2 in zip(model.weights, ref_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) self.assertTrue(p1.name.startswith("a/b/")) @is_pt_tf_cross_test def test_checkpoint_sharding_hub_from_pt(self): model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded", from_pt=True) # the model above is the same as the model below, just a sharded pytorch version. ref_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") for p1, p2 in zip(model.weights, ref_model.weights): assert np.allclose(p1.numpy(), p2.numpy()) def test_shard_checkpoint(self): # This is the model we will use, total size 340,000 bytes. model = tf.keras.Sequential( [ tf.keras.layers.Dense(200, use_bias=False), # size 80,000 tf.keras.layers.Dense(200, use_bias=False), # size 160,000 tf.keras.layers.Dense(100, use_bias=False), # size 80,000 tf.keras.layers.Dense(50, use_bias=False), # size 20,000 ] ) inputs = tf.zeros((1, 100), dtype=tf.float32) model(inputs) weights = model.weights weights_dict = {w.name: w for w in weights} with self.subTest("No shard when max size is bigger than model size"): shards, index = tf_shard_checkpoint(weights) self.assertIsNone(index) self.assertDictEqual(shards, {TF2_WEIGHTS_NAME: weights}) with self.subTest("Test sharding, no weights bigger than max size"): shards, index = tf_shard_checkpoint(weights, max_shard_size="300kB") # Split is first two layers then last two. self.assertDictEqual( index, { "metadata": {"total_size": 340000}, "weight_map": { "dense/kernel:0": "tf_model-00001-of-00002.h5", "dense_1/kernel:0": "tf_model-00001-of-00002.h5", "dense_2/kernel:0": "tf_model-00002-of-00002.h5", "dense_3/kernel:0": "tf_model-00002-of-00002.h5", }, }, ) shard1 = [weights_dict["dense/kernel:0"], weights_dict["dense_1/kernel:0"]] shard2 = [weights_dict["dense_2/kernel:0"], weights_dict["dense_3/kernel:0"]] self.assertDictEqual(shards, {"tf_model-00001-of-00002.h5": shard1, "tf_model-00002-of-00002.h5": shard2}) with self.subTest("Test sharding with weights bigger than max size"): shards, index = tf_shard_checkpoint(weights, max_shard_size="100kB") # Split is first layer, second layer then last 2. self.assertDictEqual( index, { "metadata": {"total_size": 340000}, "weight_map": { "dense/kernel:0": "tf_model-00001-of-00003.h5", "dense_1/kernel:0": "tf_model-00002-of-00003.h5", "dense_2/kernel:0": "tf_model-00003-of-00003.h5", "dense_3/kernel:0": "tf_model-00003-of-00003.h5", }, }, ) shard1 = [weights_dict["dense/kernel:0"]] shard2 = [weights_dict["dense_1/kernel:0"]] shard3 = [weights_dict["dense_2/kernel:0"], weights_dict["dense_3/kernel:0"]] self.assertDictEqual( shards, { "tf_model-00001-of-00003.h5": shard1, "tf_model-00002-of-00003.h5": shard2, "tf_model-00003-of-00003.h5": shard3, }, ) @slow def test_special_layer_name_sharding(self): retriever = RagRetriever.from_pretrained("facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True) model = TFRagModel.from_pretrained("facebook/rag-token-nq", retriever=retriever) with tempfile.TemporaryDirectory() as tmp_dir: for max_size in ["150kB", "150kiB", "200kB", "200kiB"]: model.save_pretrained(tmp_dir, max_shard_size=max_size) ref_model = TFRagModel.from_pretrained(tmp_dir, retriever=retriever) for p1, p2 in zip(model.weights, ref_model.weights): assert np.allclose(p1.numpy(), p2.numpy()) def test_checkpoint_sharding_local(self): model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: # We use the same folder for various sizes to make sure a new save erases the old checkpoint. for max_size in ["150kB", "150kiB", "200kB", "200kiB"]: model.save_pretrained(tmp_dir, max_shard_size=max_size) # Get each shard file and its size shard_to_size = {} for shard in os.listdir(tmp_dir): if shard.endswith(".h5"): shard_file = os.path.join(tmp_dir, shard) shard_to_size[shard_file] = os.path.getsize(shard_file) index_file = os.path.join(tmp_dir, TF2_WEIGHTS_INDEX_NAME) # Check there is an index but no regular weight file self.assertTrue(os.path.isfile(index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, TF2_WEIGHTS_NAME))) # Check a file is bigger than max_size only when it has a single weight for shard_file, size in shard_to_size.items(): if max_size.endswith("kiB"): max_size_int = int(max_size[:-3]) * 2**10 else: max_size_int = int(max_size[:-2]) * 10**3 # Note: pickle adds some junk so the weight of the file can end up being slightly bigger than # the size asked for (since we count parameters) if size >= max_size_int + 50000: with h5py.File(shard_file, "r") as state_file: self.assertEqual(len(state_file), 1) # Check the index and the shard files found match with open(index_file, "r", encoding="utf-8") as f: index = json.loads(f.read()) all_shards = set(index["weight_map"].values()) shards_found = {f for f in os.listdir(tmp_dir) if f.endswith(".h5")} self.assertSetEqual(all_shards, shards_found) # Finally, check the model can be reloaded new_model = TFBertModel.from_pretrained(tmp_dir) model.build() new_model.build() for p1, p2 in zip(model.weights, new_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @slow def test_save_pretrained_signatures(self): model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # Short custom TF signature function. # `input_signature` is specific to BERT. @tf.function( input_signature=[ [ tf.TensorSpec([None, None], tf.int32, name="input_ids"), tf.TensorSpec([None, None], tf.int32, name="token_type_ids"), tf.TensorSpec([None, None], tf.int32, name="attention_mask"), ] ] ) def serving_fn(input): return model(input) # Using default signature (default behavior) overrides 'serving_default' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, saved_model=True, signatures=None) model_loaded = tf.keras.models.load_model(f"{tmp_dir}/saved_model/1") self.assertTrue("serving_default" in list(model_loaded.signatures.keys())) # Providing custom signature function with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, saved_model=True, signatures={"custom_signature": serving_fn}) model_loaded = tf.keras.models.load_model(f"{tmp_dir}/saved_model/1") self.assertTrue("custom_signature" in list(model_loaded.signatures.keys())) # Providing multiple custom signature function with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( tmp_dir, saved_model=True, signatures={"custom_signature_1": serving_fn, "custom_signature_2": serving_fn}, ) model_loaded = tf.keras.models.load_model(f"{tmp_dir}/saved_model/1") self.assertTrue("custom_signature_1" in list(model_loaded.signatures.keys())) self.assertTrue("custom_signature_2" in list(model_loaded.signatures.keys())) @require_safetensors def test_safetensors_save_and_load(self): model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) # No tf_model.h5 file, only a model.safetensors self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, TF2_WEIGHTS_NAME))) new_model = TFBertModel.from_pretrained(tmp_dir) # Check models are equal for p1, p2 in zip(model.weights, new_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @is_pt_tf_cross_test def test_safetensors_save_and_load_pt_to_tf(self): model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") pt_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: pt_model.save_pretrained(tmp_dir, safe_serialization=True) # Check we have a model.safetensors file self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) new_model = TFBertModel.from_pretrained(tmp_dir) # Check models are equal for p1, p2 in zip(model.weights, new_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @require_safetensors def test_safetensors_load_from_hub(self): tf_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # Can load from the TF-formatted checkpoint safetensors_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-safetensors-tf") # Check models are equal for p1, p2 in zip(safetensors_model.weights, tf_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) # Can load from the PyTorch-formatted checkpoint safetensors_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-safetensors") # Check models are equal for p1, p2 in zip(safetensors_model.weights, tf_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @require_tf @is_staging_test class TFModelPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-model-tf") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-model-tf-callback") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-model-tf-org") except HTTPError: pass def test_push_to_hub(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = TFBertModel(config) # Make sure model is properly initialized model.build() logging.set_verbosity_info() logger = logging.get_logger("transformers.utils.hub") with CaptureLogger(logger) as cl: model.push_to_hub("test-model-tf", use_auth_token=self._token) logging.set_verbosity_warning() # Check the model card was created and uploaded. self.assertIn("Uploading the following files to __DUMMY_TRANSFORMERS_USER__/test-model-tf", cl.out) new_model = TFBertModel.from_pretrained(f"{USER}/test-model-tf") models_equal = True for p1, p2 in zip(model.weights, new_model.weights): if not tf.math.reduce_all(p1 == p2): models_equal = False break self.assertTrue(models_equal) # Reset repo delete_repo(token=self._token, repo_id="test-model-tf") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, repo_id="test-model-tf", push_to_hub=True, use_auth_token=self._token) new_model = TFBertModel.from_pretrained(f"{USER}/test-model-tf") models_equal = True for p1, p2 in zip(model.weights, new_model.weights): if not tf.math.reduce_all(p1 == p2): models_equal = False break self.assertTrue(models_equal) @is_pt_tf_cross_test def test_push_to_hub_callback(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = TFBertForMaskedLM(config) model.compile() with tempfile.TemporaryDirectory() as tmp_dir: push_to_hub_callback = PushToHubCallback( output_dir=tmp_dir, hub_model_id="test-model-tf-callback", hub_token=self._token, ) model.fit(model.dummy_inputs, model.dummy_inputs, epochs=1, callbacks=[push_to_hub_callback]) new_model = TFBertForMaskedLM.from_pretrained(f"{USER}/test-model-tf-callback") models_equal = True for p1, p2 in zip(model.weights, new_model.weights): if not tf.math.reduce_all(p1 == p2): models_equal = False break self.assertTrue(models_equal) tf_push_to_hub_params = dict(inspect.signature(TFPreTrainedModel.push_to_hub).parameters) tf_push_to_hub_params.pop("base_model_card_args") pt_push_to_hub_params = dict(inspect.signature(PreTrainedModel.push_to_hub).parameters) pt_push_to_hub_params.pop("deprecated_kwargs") self.assertDictEaual(tf_push_to_hub_params, pt_push_to_hub_params) def test_push_to_hub_in_organization(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = TFBertModel(config) # Make sure model is properly initialized model.build() model.push_to_hub("valid_org/test-model-tf-org", use_auth_token=self._token) new_model = TFBertModel.from_pretrained("valid_org/test-model-tf-org") models_equal = True for p1, p2 in zip(model.weights, new_model.weights): if not tf.math.reduce_all(p1 == p2): models_equal = False break self.assertTrue(models_equal) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-model-tf-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( tmp_dir, push_to_hub=True, use_auth_token=self._token, repo_id="valid_org/test-model-tf-org" ) new_model = TFBertModel.from_pretrained("valid_org/test-model-tf-org") models_equal = True for p1, p2 in zip(model.weights, new_model.weights): if not tf.math.reduce_all(p1 == p2): models_equal = False break self.assertTrue(models_equal)
27,861
43.366242
118
py
transformers
transformers-main/tests/test_backbone_common.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect from transformers.testing_utils import require_torch, torch_device from transformers.utils.backbone_utils import BackboneType @require_torch class BackboneTesterMixin: all_model_classes = () has_attentions = True def test_config(self): config_class = self.config_class # test default config config = config_class() self.assertIsNotNone(config) expected_stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(config.depths) + 1)] self.assertEqual(config.stage_names, expected_stage_names) self.assertTrue(set(config.out_features).issubset(set(config.stage_names))) # Test out_features and out_indices are correctly set # out_features and out_indices both None config = config_class(out_features=None, out_indices=None) self.assertEqual(config.out_features, [config.stage_names[-1]]) self.assertEqual(config.out_indices, [len(config.stage_names) - 1]) # out_features and out_indices both set config = config_class(out_features=["stem", "stage1"], out_indices=[0, 1]) self.assertEqual(config.out_features, ["stem", "stage1"]) self.assertEqual(config.out_indices, [0, 1]) # Only out_features set config = config_class(out_features=["stage1", "stage3"]) self.assertEqual(config.out_features, ["stage1", "stage3"]) self.assertEqual(config.out_indices, [1, 3]) # Only out_indices set config = config_class(out_indices=[0, 2]) self.assertEqual(config.out_features, [config.stage_names[0], config.stage_names[2]]) self.assertEqual(config.out_indices, [0, 2]) # Error raised when out_indices do not correspond to out_features with self.assertRaises(ValueError): config = config_class(out_features=["stage1", "stage2"], out_indices=[0, 2]) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_channels(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertEqual(len(model.channels), len(config.out_features)) num_features = model.num_features out_indices = [config.stage_names.index(feat) for feat in config.out_features] out_channels = [num_features[idx] for idx in out_indices] self.assertListEqual(model.channels, out_channels) new_config = copy.deepcopy(config) new_config.out_features = None model = model_class(new_config) self.assertEqual(len(model.channels), 1) self.assertListEqual(model.channels, [num_features[-1]]) new_config = copy.deepcopy(config) new_config.out_indices = None model = model_class(new_config) self.assertEqual(len(model.channels), 1) self.assertListEqual(model.channels, [num_features[-1]]) def test_create_from_modified_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.assertEqual(len(result.feature_maps), len(config.out_features)) self.assertEqual(len(model.channels), len(config.out_features)) self.assertEqual(len(result.feature_maps), len(config.out_indices)) self.assertEqual(len(model.channels), len(config.out_indices)) # Check output of last stage is taken if out_features=None, out_indices=None modified_config = copy.deepcopy(config) modified_config.out_features = None model = model_class(modified_config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.assertEqual(len(result.feature_maps), 1) self.assertEqual(len(model.channels), 1) modified_config = copy.deepcopy(config) modified_config.out_indices = None model = model_class(modified_config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.assertEqual(len(result.feature_maps), 1) self.assertEqual(len(model.channels), 1) # Check backbone can be initialized with fresh weights modified_config = copy.deepcopy(config) modified_config.use_pretrained_backbone = False model = model_class(modified_config) model.to(torch_device) model.eval() result = model(**inputs_dict) def test_backbone_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for backbone_class in self.all_model_classes: backbone = backbone_class(config) self.assertTrue(hasattr(backbone, "backbone_type")) self.assertTrue(hasattr(backbone, "stage_names")) self.assertTrue(hasattr(backbone, "num_features")) self.assertTrue(hasattr(backbone, "out_indices")) self.assertTrue(hasattr(backbone, "out_features")) self.assertTrue(hasattr(backbone, "out_feature_channels")) self.assertTrue(hasattr(backbone, "channels")) self.assertIsInstance(backbone.backbone_type, BackboneType) # Verify num_features has been initialized in the backbone init self.assertIsNotNone(backbone.num_features) self.assertTrue(len(backbone.channels) == len(backbone.out_indices)) self.assertTrue(len(backbone.stage_names) == len(backbone.num_features)) self.assertTrue(len(backbone.channels) <= len(backbone.num_features)) self.assertTrue(len(backbone.out_feature_channels) == len(backbone.stage_names)) def test_backbone_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() batch_size = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: backbone = backbone_class(config) backbone.to(torch_device) backbone.eval() outputs = backbone(**inputs_dict) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps, tuple) self.assertTrue(len(outputs.feature_maps) == len(backbone.channels)) for feature_map, n_channels in zip(outputs.feature_maps, backbone.channels): self.assertTrue(feature_map.shape[:2], (batch_size, n_channels)) self.assertIsNone(outputs.hidden_states) self.assertIsNone(outputs.attentions) # Test output_hidden_states=True outputs = backbone(**inputs_dict, output_hidden_states=True) self.assertIsNotNone(outputs.hidden_states) self.assertTrue(len(outputs.hidden_states), len(backbone.stage_names)) for hidden_state, n_channels in zip(outputs.hidden_states, backbone.channels): self.assertTrue(hidden_state.shape[:2], (batch_size, n_channels)) # Test output_attentions=True if self.has_attentions: outputs = backbone(**inputs_dict, output_attentions=True) self.assertIsNotNone(outputs.attentions)
8,644
44.026042
101
py
transformers
transformers-main/tests/test_tokenization_utils.py
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPT2TokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class TokenizerUtilTester(unittest.TestCase): def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def test_cached_files_are_used_when_internet_is_down_missing_files(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = GPT2TokenizerFast.from_pretrained("gpt2") # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = GPT2TokenizerFast.from_pretrained("gpt2") # This check we did call the fake head request mock_head.assert_called() def test_legacy_load_from_one_file(self): # This test is for deprecated behavior and can be removed in v5 try: tmp_file = tempfile.mktemp() with open(tmp_file, "wb") as f: http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model", f) _ = AlbertTokenizer.from_pretrained(tmp_file) finally: os.remove(tmp_file) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("tokenizer.json"): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("tokenizer.json", "wb") as f: http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json", f) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size, 1000) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("tokenizer.json") def test_legacy_load_from_url(self): # This test is for deprecated behavior and can be removed in v5 _ = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model") @is_staging_test class TokenizerPushToHubTester(unittest.TestCase): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-tokenizer") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-tokenizer-org") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-tokenizer") except HTTPError: pass def test_push_to_hub(self): with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = BertTokenizer(vocab_file) tokenizer.push_to_hub("test-tokenizer", use_auth_token=self._token) new_tokenizer = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer") self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) # Reset repo delete_repo(token=self._token, repo_id="test-tokenizer") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(tmp_dir, repo_id="test-tokenizer", push_to_hub=True, use_auth_token=self._token) new_tokenizer = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer") self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) def test_push_to_hub_in_organization(self): with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = BertTokenizer(vocab_file) tokenizer.push_to_hub("valid_org/test-tokenizer-org", use_auth_token=self._token) new_tokenizer = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org") self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-tokenizer-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( tmp_dir, repo_id="valid_org/test-tokenizer-org", push_to_hub=True, use_auth_token=self._token ) new_tokenizer = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org") self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) @require_tokenizers def test_push_to_hub_dynamic_tokenizer(self): CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = CustomTokenizer(vocab_file) # No fast custom tokenizer tokenizer.push_to_hub("test-dynamic-tokenizer", use_auth_token=self._token) tokenizer = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer", trust_remote_code=True) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer") # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) bert_tokenizer = BertTokenizerFast.from_pretrained(tmp_dir) bert_tokenizer.save_pretrained(tmp_dir) tokenizer = CustomTokenizerFast.from_pretrained(tmp_dir) tokenizer.push_to_hub("test-dynamic-tokenizer", use_auth_token=self._token) tokenizer = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer", trust_remote_code=True) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizerFast") tokenizer = AutoTokenizer.from_pretrained( f"{USER}/test-dynamic-tokenizer", use_fast=False, trust_remote_code=True ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer") class TrieTest(unittest.TestCase): def test_trie(self): trie = Trie() trie.add("Hello 友達") self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}}) trie.add("Hello") trie.data self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}}) def test_trie_split(self): trie = Trie() self.assertEqual(trie.split("[CLS] This is a extra_id_100"), ["[CLS] This is a extra_id_100"]) trie.add("[CLS]") trie.add("extra_id_1") trie.add("extra_id_100") self.assertEqual(trie.split("[CLS] This is a extra_id_100"), ["[CLS]", " This is a ", "extra_id_100"]) def test_trie_single(self): trie = Trie() trie.add("A") self.assertEqual(trie.split("ABC"), ["A", "BC"]) self.assertEqual(trie.split("BCA"), ["BC", "A"]) def test_trie_final(self): trie = Trie() trie.add("TOKEN]") trie.add("[SPECIAL_TOKEN]") self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]"), ["This is something ", "[SPECIAL_TOKEN]"]) def test_trie_subtokens(self): trie = Trie() trie.add("A") trie.add("P") trie.add("[SPECIAL_TOKEN]") self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]"), ["This is something ", "[SPECIAL_TOKEN]"]) def test_trie_suffix_tokens(self): trie = Trie() trie.add("AB") trie.add("B") trie.add("C") self.assertEqual(trie.split("ABC"), ["AB", "C"]) def test_trie_skip(self): trie = Trie() trie.add("ABC") trie.add("B") trie.add("CD") self.assertEqual(trie.split("ABCD"), ["ABC", "D"]) def test_cut_text_hardening(self): # Even if the offsets are wrong, we necessarily output correct string # parts. trie = Trie() parts = trie.cut_text("ABC", [0, 0, 2, 1, 2, 3]) self.assertEqual(parts, ["AB", "C"])
11,793
40.97153
123
py
transformers
transformers-main/tests/test_feature_extraction_common.py
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class FeatureExtractionSavingTestMixin: test_cast_dtype = None def test_feat_extract_to_json_string(self): feat_extract = self.feature_extraction_class(**self.feat_extract_dict) obj = json.loads(feat_extract.to_json_string()) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key], value) def test_feat_extract_to_json_file(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "feat_extract.json") feat_extract_first.to_json_file(json_file_path) feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) self.assertEqual(feat_extract_second.to_dict(), feat_extract_first.to_dict()) def test_feat_extract_from_and_save_pretrained(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) self.assertEqual(feat_extract_second.to_dict(), feat_extract_first.to_dict()) def test_init_without_params(self): feat_extract = self.feature_extraction_class() self.assertIsNotNone(feat_extract)
2,230
38.839286
94
py
transformers
transformers-main/tests/test_sequence_feature_extraction_common.py
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class SequenceFeatureExtractionTestMixin(FeatureExtractionSavingTestMixin): # to overwrite at feature extractactor specific tests feat_extract_tester = None feature_extraction_class = None @property def feat_extract_dict(self): return self.feat_extract_tester.prepare_feat_extract_dict() def test_feat_extract_common_properties(self): feat_extract = self.feature_extraction_class(**self.feat_extract_dict) self.assertTrue(hasattr(feat_extract, "feature_size")) self.assertTrue(hasattr(feat_extract, "sampling_rate")) self.assertTrue(hasattr(feat_extract, "padding_value")) def test_batch_feature(self): speech_inputs = self.feat_extract_tester.prepare_inputs_for_common() feat_extract = self.feature_extraction_class(**self.feat_extract_dict) input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}) self.assertTrue(all(len(x) == len(y) for x, y in zip(speech_inputs, processed_features[input_name]))) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(equal_length=True) processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="np") batch_features_input = processed_features[input_name] if len(batch_features_input.shape) < 3: batch_features_input = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size) ) @require_torch def test_batch_feature_pt(self): speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(equal_length=True) feat_extract = self.feature_extraction_class(**self.feat_extract_dict) input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="pt") batch_features_input = processed_features[input_name] if len(batch_features_input.shape) < 3: batch_features_input = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size) ) @require_tf def test_batch_feature_tf(self): speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(equal_length=True) feat_extract = self.feature_extraction_class(**self.feat_extract_dict) input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="tf") batch_features_input = processed_features[input_name] if len(batch_features_input.shape) < 3: batch_features_input = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size) ) def _check_padding(self, numpify=False): def _inputs_have_equal_length(input): length = len(input[0]) for input_slice in input[1:]: if len(input_slice) != length: return False return True def _inputs_are_equal(input_1, input_2): if len(input_1) != len(input_2): return False for input_slice_1, input_slice_2 in zip(input_1, input_2): if not np.allclose(np.asarray(input_slice_1), np.asarray(input_slice_2), atol=1e-3): return False return True feat_extract = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(numpify=numpify) input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}) pad_diff = self.feat_extract_tester.seq_length_diff pad_max_length = self.feat_extract_tester.max_seq_length + pad_diff pad_min_length = self.feat_extract_tester.min_seq_length batch_size = self.feat_extract_tester.batch_size feature_size = self.feat_extract_tester.feature_size # test padding for List[int] + numpy input_1 = feat_extract.pad(processed_features, padding=False) input_1 = input_1[input_name] input_2 = feat_extract.pad(processed_features, padding="longest") input_2 = input_2[input_name] input_3 = feat_extract.pad(processed_features, padding="max_length", max_length=len(speech_inputs[-1])) input_3 = input_3[input_name] input_4 = feat_extract.pad(processed_features, padding="longest", return_tensors="np") input_4 = input_4[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(ValueError): feat_extract.pad(processed_features, padding="max_length")[input_name] input_5 = feat_extract.pad( processed_features, padding="max_length", max_length=pad_max_length, return_tensors="np" ) input_5 = input_5[input_name] self.assertFalse(_inputs_have_equal_length(input_1)) self.assertTrue(_inputs_have_equal_length(input_2)) self.assertTrue(_inputs_have_equal_length(input_3)) self.assertTrue(_inputs_are_equal(input_2, input_3)) self.assertTrue(len(input_1[0]) == pad_min_length) self.assertTrue(len(input_1[1]) == pad_min_length + pad_diff) self.assertTrue(input_4.shape[:2] == (batch_size, len(input_3[0]))) self.assertTrue(input_5.shape[:2] == (batch_size, pad_max_length)) if feature_size > 1: self.assertTrue(input_4.shape[2] == input_5.shape[2] == feature_size) # test padding for `pad_to_multiple_of` for List[int] + numpy input_6 = feat_extract.pad(processed_features, pad_to_multiple_of=10) input_6 = input_6[input_name] input_7 = feat_extract.pad(processed_features, padding="longest", pad_to_multiple_of=10) input_7 = input_7[input_name] input_8 = feat_extract.pad( processed_features, padding="max_length", pad_to_multiple_of=10, max_length=pad_max_length ) input_8 = input_8[input_name] input_9 = feat_extract.pad( processed_features, padding="max_length", pad_to_multiple_of=10, max_length=pad_max_length, return_tensors="np", ) input_9 = input_9[input_name] self.assertTrue(all(len(x) % 10 == 0 for x in input_6)) self.assertTrue(_inputs_are_equal(input_6, input_7)) expected_mult_pad_length = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(x) == expected_mult_pad_length for x in input_8)) self.assertEqual(input_9.shape[:2], (batch_size, expected_mult_pad_length)) if feature_size > 1: self.assertTrue(input_9.shape[2] == feature_size) # Check padding value is correct padding_vector_sum = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_2[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1e-3 ) self.assertTrue( abs( np.asarray(input_2[1])[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1e-3 ) self.assertTrue( abs( np.asarray(input_2[2])[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1e-3 ) self.assertTrue( abs(input_5[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1e-3 ) self.assertTrue( abs(input_9[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length)) < 1e-3 ) def _check_truncation(self, numpify=False): def _inputs_have_equal_length(input): length = len(input[0]) for input_slice in input[1:]: if len(input_slice) != length: return False return True def _inputs_are_equal(input_1, input_2): if len(input_1) != len(input_2): return False for input_slice_1, input_slice_2 in zip(input_1, input_2): if not np.allclose(np.asarray(input_slice_1), np.asarray(input_slice_2), atol=1e-3): return False return True feat_extract = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(numpify=numpify) input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}) # truncate to smallest input_1 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[0]), truncation=True ) input_1 = input_1[input_name] input_2 = feat_extract.pad(processed_features, padding="max_length", max_length=len(speech_inputs[0])) input_2 = input_2[input_name] self.assertTrue(_inputs_have_equal_length(input_1)) self.assertFalse(_inputs_have_equal_length(input_2)) # truncate to smallest with np input_3 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[0]), return_tensors="np", truncation=True, ) input_3 = input_3[input_name] input_4 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[0]), return_tensors="np" ) input_4 = input_4[input_name] self.assertTrue(_inputs_have_equal_length(input_3)) self.assertTrue(input_3.shape[1] == len(speech_inputs[0])) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(input_4)) # truncate to middle input_5 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[1]), truncation=True, return_tensors="np", ) input_5 = input_5[input_name] input_6 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[1]), truncation=True ) input_6 = input_6[input_name] input_7 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[1]), return_tensors="np" ) input_7 = input_7[input_name] self.assertTrue(input_5.shape[1] == len(speech_inputs[1])) self.assertTrue(_inputs_have_equal_length(input_5)) self.assertTrue(_inputs_have_equal_length(input_6)) self.assertTrue(_inputs_are_equal(input_5, input_6)) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(input_7)) self.assertTrue(len(input_7[-1]) == len(speech_inputs[-1])) # padding has to be max_length when setting `truncation=True` with self.assertRaises(ValueError): feat_extract.pad(processed_features, truncation=True)[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(ValueError): feat_extract.pad(processed_features, padding="longest", truncation=True)[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(ValueError): feat_extract.pad(processed_features, padding="longest", truncation=True)[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(ValueError): feat_extract.pad(processed_features, padding="max_length", truncation=True)[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy pad_to_multiple_of = 12 input_8 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[0]), pad_to_multiple_of=pad_to_multiple_of, truncation=True, ) input_8 = input_8[input_name] input_9 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[0]), pad_to_multiple_of=pad_to_multiple_of, ) input_9 = input_9[input_name] # retrieve expected_length as multiple of pad_to_multiple_of expected_length = len(speech_inputs[0]) if expected_length % pad_to_multiple_of != 0: expected_length = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_8[0]) == expected_length) self.assertTrue(_inputs_have_equal_length(input_8)) self.assertFalse(_inputs_have_equal_length(input_9)) def test_padding_from_list(self): self._check_padding(numpify=False) def test_padding_from_array(self): self._check_padding(numpify=True) def test_truncation_from_list(self): self._check_truncation(numpify=False) def test_truncation_from_array(self): self._check_truncation(numpify=True) @require_torch def test_padding_accepts_tensors_pt(self): feat_extract = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common() input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}) input_np = feat_extract.pad(processed_features, padding="longest", return_tensors="np")[input_name] input_pt = feat_extract.pad(processed_features, padding="longest", return_tensors="pt")[input_name] self.assertTrue(abs(input_np.astype(np.float32).sum() - input_pt.numpy().astype(np.float32).sum()) < 1e-2) @require_tf def test_padding_accepts_tensors_tf(self): feat_extract = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common() input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}) input_np = feat_extract.pad(processed_features, padding="longest", return_tensors="np")[input_name] input_tf = feat_extract.pad(processed_features, padding="longest", return_tensors="tf")[input_name] self.assertTrue(abs(input_np.astype(np.float32).sum() - input_tf.numpy().astype(np.float32).sum()) < 1e-2) def test_attention_mask(self): feat_dict = self.feat_extract_dict feat_dict["return_attention_mask"] = True feat_extract = self.feature_extraction_class(**feat_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common() input_lenghts = [len(x) for x in speech_inputs] input_name = feat_extract.model_input_names[0] processed = BatchFeature({input_name: speech_inputs}) processed = feat_extract.pad(processed, padding="longest", return_tensors="np") self.assertIn("attention_mask", processed) self.assertListEqual(list(processed.attention_mask.shape), list(processed[input_name].shape[:2])) self.assertListEqual(processed.attention_mask.sum(-1).tolist(), input_lenghts) def test_attention_mask_with_truncation(self): feat_dict = self.feat_extract_dict feat_dict["return_attention_mask"] = True feat_extract = self.feature_extraction_class(**feat_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common() input_lenghts = [len(x) for x in speech_inputs] input_name = feat_extract.model_input_names[0] processed = BatchFeature({input_name: speech_inputs}) max_length = min(input_lenghts) processed_pad = feat_extract.pad( processed, padding="max_length", max_length=max_length, truncation=True, return_tensors="np" ) self.assertIn("attention_mask", processed_pad) self.assertListEqual( list(processed_pad.attention_mask.shape), [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1).tolist(), [max_length for x in speech_inputs] )
18,041
41.451765
119
py
transformers
transformers-main/tests/test_image_processing_common.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available if is_torch_available(): import numpy as np import torch if is_vision_available(): from PIL import Image def prepare_image_inputs(image_processor_tester, equal_resolution=False, numpify=False, torchify=False): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. One can specify whether the images are of the same resolution or not. """ assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" image_inputs = [] for i in range(image_processor_tester.batch_size): if equal_resolution: width = height = image_processor_tester.max_resolution else: # To avoid getting image width/height 0 min_resolution = image_processor_tester.min_resolution if getattr(image_processor_tester, "size_divisor", None): # If `size_divisor` is defined, the image needs to have width/size >= `size_divisor` min_resolution = max(image_processor_tester.size_divisor, min_resolution) width, height = np.random.choice(np.arange(min_resolution, image_processor_tester.max_resolution), 2) image_inputs.append( np.random.randint(255, size=(image_processor_tester.num_channels, width, height), dtype=np.uint8) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension image_inputs = [Image.fromarray(np.moveaxis(image, 0, -1)) for image in image_inputs] if torchify: image_inputs = [torch.from_numpy(image) for image in image_inputs] return image_inputs def prepare_video(image_processor_tester, width=10, height=10, numpify=False, torchify=False): """This function prepares a video as a list of PIL images/NumPy arrays/PyTorch tensors.""" video = [] for i in range(image_processor_tester.num_frames): video.append(np.random.randint(255, size=(image_processor_tester.num_channels, width, height), dtype=np.uint8)) if not numpify and not torchify: # PIL expects the channel dimension as last dimension video = [Image.fromarray(np.moveaxis(frame, 0, -1)) for frame in video] if torchify: video = [torch.from_numpy(frame) for frame in video] return video def prepare_video_inputs(image_processor_tester, equal_resolution=False, numpify=False, torchify=False): """This function prepares a batch of videos: a list of list of PIL images, or a list of list of numpy arrays if one specifies numpify=True, or a list of list of PyTorch tensors if one specifies torchify=True. One can specify whether the videos are of the same resolution or not. """ assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" video_inputs = [] for i in range(image_processor_tester.batch_size): if equal_resolution: width = height = image_processor_tester.max_resolution else: width, height = np.random.choice( np.arange(image_processor_tester.min_resolution, image_processor_tester.max_resolution), 2 ) video = prepare_video( image_processor_tester=image_processor_tester, width=width, height=height, numpify=numpify, torchify=torchify, ) video_inputs.append(video) return video_inputs class ImageProcessingSavingTestMixin: test_cast_dtype = None def test_image_processor_to_json_string(self): image_processor = self.image_processing_class(**self.image_processor_dict) obj = json.loads(image_processor.to_json_string()) for key, value in self.image_processor_dict.items(): self.assertEqual(obj[key], value) def test_image_processor_to_json_file(self): image_processor_first = self.image_processing_class(**self.image_processor_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "image_processor.json") image_processor_first.to_json_file(json_file_path) image_processor_second = self.image_processing_class.from_json_file(json_file_path) self.assertEqual(image_processor_second.to_dict(), image_processor_first.to_dict()) def test_image_processor_from_and_save_pretrained(self): image_processor_first = self.image_processing_class(**self.image_processor_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = image_processor_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) image_processor_second = self.image_processing_class.from_pretrained(tmpdirname) self.assertEqual(image_processor_second.to_dict(), image_processor_first.to_dict()) def test_init_without_params(self): image_processor = self.image_processing_class() self.assertIsNotNone(image_processor) @require_torch @require_vision def test_cast_dtype_device(self): if self.test_cast_dtype is not None: # Initialize image_processor image_processor = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) encoding = image_processor(image_inputs, return_tensors="pt") # for layoutLM compatiblity self.assertEqual(encoding.pixel_values.device, torch.device("cpu")) self.assertEqual(encoding.pixel_values.dtype, torch.float32) encoding = image_processor(image_inputs, return_tensors="pt").to(torch.float16) self.assertEqual(encoding.pixel_values.device, torch.device("cpu")) self.assertEqual(encoding.pixel_values.dtype, torch.float16) encoding = image_processor(image_inputs, return_tensors="pt").to("cpu", torch.bfloat16) self.assertEqual(encoding.pixel_values.device, torch.device("cpu")) self.assertEqual(encoding.pixel_values.dtype, torch.bfloat16) with self.assertRaises(TypeError): _ = image_processor(image_inputs, return_tensors="pt").to(torch.bfloat16, "cpu") # Try with text + image feature encoding = image_processor(image_inputs, return_tensors="pt") encoding.update({"input_ids": torch.LongTensor([[1, 2, 3], [4, 5, 6]])}) encoding = encoding.to(torch.float16) self.assertEqual(encoding.pixel_values.device, torch.device("cpu")) self.assertEqual(encoding.pixel_values.dtype, torch.float16) self.assertEqual(encoding.input_ids.dtype, torch.long)
7,751
42.307263
119
py
transformers
transformers-main/tests/test_modeling_common.py
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import copy import gc import inspect import os import os.path import pickle import random import re import tempfile import warnings from collections import defaultdict from typing import Dict, List, Tuple import numpy as np from pytest import mark import transformers from transformers import ( AutoModel, AutoModelForSequenceClassification, PretrainedConfig, is_torch_available, logging, ) from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES, MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from transformers.testing_utils import ( CaptureLogger, is_pt_flax_cross_test, is_pt_tf_cross_test, require_accelerate, require_safetensors, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, torch_device, ) from transformers.utils import ( CONFIG_NAME, GENERATION_CONFIG_NAME, WEIGHTS_NAME, is_accelerate_available, is_flax_available, is_tf_available, is_torch_fx_available, ) from transformers.utils.generic import ModelOutput if is_accelerate_available(): from accelerate.utils import compute_module_sizes if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, AdaptiveEmbedding from transformers.pytorch_utils import id_tensor_storage if is_tf_available(): import tensorflow as tf if is_flax_available(): import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_fx_available(): from transformers.utils.fx import symbolic_trace def _config_zero_init(config): configs_no_init = copy.deepcopy(config) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(configs_no_init, key, 1e-10) if isinstance(getattr(configs_no_init, key, None), PretrainedConfig): no_init_subconfig = _config_zero_init(getattr(configs_no_init, key)) setattr(configs_no_init, key, no_init_subconfig) return configs_no_init def _mock_init_weights(self, module): for name, param in module.named_parameters(recurse=False): # Use the first letter of the name to get a value and go from a <> -13 to z <> 12 value = ord(name[0].lower()) - 110 param.data.fill_(value) def _mock_all_init_weights(self): # Prune heads if needed if self.config.pruned_heads: self.prune_heads(self.config.pruned_heads) import transformers.modeling_utils if transformers.modeling_utils._init_weights: for module in self.modules(): module._is_hf_initialized = False # Initialize weights self.apply(self._initialize_weights) # Tie weights should be skipped when not initializing all weights # since from_pretrained(...) calls tie weights anyways self.tie_weights() @require_torch class ModelTesterMixin: model_tester = None all_model_classes = () all_generative_model_classes = () fx_compatible = False test_torchscript = True test_pruning = True test_resize_embeddings = True test_resize_position_embeddings = False test_head_masking = True test_mismatched_shapes = True test_missing_keys = True test_model_parallel = False is_encoder_decoder = False has_attentions = True model_split_percents = [0.5, 0.7, 0.9] def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if model_class.__name__ in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES): inputs_dict = { k: v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous() if isinstance(v, torch.Tensor) and v.ndim > 1 else v for k, v in inputs_dict.items() } elif model_class.__name__ in get_values(MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES): inputs_dict.pop("attention_mask") if return_labels: if model_class.__name__ in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES): inputs_dict["labels"] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=torch_device) elif model_class.__name__ in [ *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES), *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES), ]: inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class.__name__ in [ *get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES), *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES), ]: inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class.__name__ in [ *get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_CAUSAL_LM_MAPPING_NAMES), *get_values(MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES), *get_values(MODEL_FOR_MASKED_LM_MAPPING_NAMES), *get_values(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES), ]: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) elif model_class.__name__ in get_values(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES): num_patches = self.model_tester.image_size // self.model_tester.patch_size inputs_dict["bool_masked_pos"] = torch.zeros( (self.model_tester.batch_size, num_patches**2), dtype=torch.long, device=torch_device ) elif model_class.__name__ in get_values(MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES): batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape inputs_dict["labels"] = torch.zeros( [self.model_tester.batch_size, height, width], device=torch_device ).long() return inputs_dict def test_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_save_load(out1, out2): # make sure we don't have nans out_2 = out2.cpu().numpy() out_2[np.isnan(out_2)] = 0 out_1 = out1.cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): first = model(**self._prepare_for_class(inputs_dict, model_class))[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # the config file (and the generation config file, if it can generate) should be saved self.assertTrue(os.path.exists(os.path.join(tmpdirname, CONFIG_NAME))) self.assertEqual( model.can_generate(), os.path.exists(os.path.join(tmpdirname, GENERATION_CONFIG_NAME)) ) model = model_class.from_pretrained(tmpdirname) model.to(torch_device) with torch.no_grad(): second = model(**self._prepare_for_class(inputs_dict, model_class))[0] if isinstance(first, tuple) and isinstance(second, tuple): for tensor1, tensor2 in zip(first, second): check_save_load(tensor1, tensor2) else: check_save_load(first, second) def test_from_pretrained_no_checkpoint(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) state_dict = model.state_dict() new_model = model_class.from_pretrained( pretrained_model_name_or_path=None, config=config, state_dict=state_dict ) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_save_load_keys_to_ignore_on_save(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) _keys_to_ignore_on_save = getattr(model, "_keys_to_ignore_on_save", None) if _keys_to_ignore_on_save is None: continue # check the keys are in the original state_dict for k in _keys_to_ignore_on_save: self.assertIn(k, model.state_dict().keys(), "\n".join(model.state_dict().keys())) # check that certain keys didn't get saved with the model with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) output_model_file = os.path.join(tmpdirname, WEIGHTS_NAME) state_dict_saved = torch.load(output_model_file) for k in _keys_to_ignore_on_save: self.assertNotIn(k, state_dict_saved.keys(), "\n".join(state_dict_saved.keys())) # Test we can load the state dict in the model, necessary for the checkpointing API in Trainer. load_result = model.load_state_dict(state_dict_saved, strict=False) self.assertTrue( len(load_result.missing_keys) == 0 or set(load_result.missing_keys) == set(model._keys_to_ignore_on_save) ) self.assertTrue(len(load_result.unexpected_keys) == 0) def test_gradient_checkpointing_backward_compatibility(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class.supports_gradient_checkpointing: continue config.gradient_checkpointing = True model = model_class(config) self.assertTrue(model.is_gradient_checkpointing) def test_gradient_checkpointing_enable_disable(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class.supports_gradient_checkpointing: continue # at init model should have gradient checkpointing disabled model = model_class(config) self.assertFalse(model.is_gradient_checkpointing) # check enable works model.gradient_checkpointing_enable() self.assertTrue(model.is_gradient_checkpointing) # check disable works model.gradient_checkpointing_disable() self.assertFalse(model.is_gradient_checkpointing) def test_save_load_fast_init_from_base(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if config.__class__ not in MODEL_MAPPING: return base_class = MODEL_MAPPING[config.__class__] if isinstance(base_class, tuple): base_class = base_class[0] for model_class in self.all_model_classes: if model_class == base_class: continue # make a copy of model class to not break future tests # from https://stackoverflow.com/questions/9541025/how-to-copy-a-python-class class CopyClass(model_class): pass model_class_copy = CopyClass # make sure that all keys are expected for test model_class_copy._keys_to_ignore_on_load_missing = [] # make init deterministic, but make sure that # non-initialized weights throw errors nevertheless model_class_copy._init_weights = _mock_init_weights model_class_copy.init_weights = _mock_all_init_weights model = base_class(config) state_dict = model.state_dict() # this will often delete a single weight of a multi-weight module # to test an edge case random_key_to_del = random.choice(list(state_dict.keys())) del state_dict[random_key_to_del] # check that certain keys didn't get saved with the model with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) torch.save(state_dict, os.path.join(tmpdirname, "pytorch_model.bin")) model_fast_init = model_class_copy.from_pretrained(tmpdirname) model_slow_init = model_class_copy.from_pretrained(tmpdirname, _fast_init=False) # Before we test anything for key in model_fast_init.state_dict().keys(): if isinstance(model_slow_init.state_dict()[key], torch.BoolTensor): max_diff = (model_slow_init.state_dict()[key] ^ model_fast_init.state_dict()[key]).sum().item() else: max_diff = (model_slow_init.state_dict()[key] - model_fast_init.state_dict()[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_save_load_fast_init_to_base(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if config.__class__ not in MODEL_MAPPING: return base_class = MODEL_MAPPING[config.__class__] if isinstance(base_class, tuple): base_class = base_class[0] for model_class in self.all_model_classes: if model_class == base_class: continue # make a copy of model class to not break future tests # from https://stackoverflow.com/questions/9541025/how-to-copy-a-python-class class CopyClass(base_class): pass base_class_copy = CopyClass # make sure that all keys are expected for test base_class_copy._keys_to_ignore_on_load_missing = [] # make init deterministic, but make sure that # non-initialized weights throw errors nevertheless base_class_copy._init_weights = _mock_init_weights base_class_copy.init_weights = _mock_all_init_weights model = model_class(config) state_dict = model.state_dict() # this will often delete a single weight of a multi-weight module # to test an edge case random_key_to_del = random.choice(list(state_dict.keys())) del state_dict[random_key_to_del] # check that certain keys didn't get saved with the model with tempfile.TemporaryDirectory() as tmpdirname: model.config.save_pretrained(tmpdirname) torch.save(state_dict, os.path.join(tmpdirname, "pytorch_model.bin")) model_fast_init = base_class_copy.from_pretrained(tmpdirname) model_slow_init = base_class_copy.from_pretrained(tmpdirname, _fast_init=False) for key in model_fast_init.state_dict().keys(): if isinstance(model_slow_init.state_dict()[key], torch.BoolTensor): max_diff = torch.max( model_slow_init.state_dict()[key] ^ model_fast_init.state_dict()[key] ).item() else: max_diff = torch.max( torch.abs(model_slow_init.state_dict()[key] - model_fast_init.state_dict()[key]) ).item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_determinism(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_determinism(first, second): out_1 = first.cpu().numpy() out_2 = second.cpu().numpy() out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): first = model(**self._prepare_for_class(inputs_dict, model_class))[0] second = model(**self._prepare_for_class(inputs_dict, model_class))[0] if isinstance(first, tuple) and isinstance(second, tuple): for tensor1, tensor2 in zip(first, second): check_determinism(tensor1, tensor2) else: check_determinism(first, second) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = [ "input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["input_ids"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_training(self): if not self.model_tester.is_training: return for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True if model_class.__name__ in [ *get_values(MODEL_MAPPING_NAMES), *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES), ]: continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): if not self.model_tester.is_training: return for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True if ( model_class.__name__ in [*get_values(MODEL_MAPPING_NAMES), *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES)] or not model_class.supports_gradient_checkpointing ): continue model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="Model does not output attentions") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Question Answering model returns start_logits and end_logits if model_class.__name__ in [ *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES), *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES), ]: correct_outlen += 1 # start_logits and end_logits instead of only 1 output if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(self_attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) @slow def test_torchscript_simple(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() self._create_and_check_torchscript(config, inputs_dict) @slow def test_torchscript_output_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_attentions = True self._create_and_check_torchscript(config, inputs_dict) @slow def test_torchscript_output_hidden_state(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True self._create_and_check_torchscript(config, inputs_dict) # This is copied from `torch/testing/_internal/jit_utils.py::clear_class_registry` def clear_torch_jit_class_registry(self): torch._C._jit_clear_class_registry() torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore() # torch 1.8 has no `_clear_class_state` in `torch.jit._state` if hasattr(torch.jit._state, "_clear_class_state"): torch.jit._state._clear_class_state() def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) main_input_name = model_class.main_input_name try: if model.config.is_encoder_decoder: model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward main_input = inputs[main_input_name] attention_mask = inputs["attention_mask"] decoder_input_ids = inputs["decoder_input_ids"] decoder_attention_mask = inputs["decoder_attention_mask"] model(main_input, attention_mask, decoder_input_ids, decoder_attention_mask) traced_model = torch.jit.trace( model, (main_input, attention_mask, decoder_input_ids, decoder_attention_mask) ) elif "bbox" in inputs and "image" in inputs: # LayoutLMv2 requires additional inputs input_ids = inputs["input_ids"] bbox = inputs["bbox"] image = inputs["image"].tensor model(input_ids, bbox, image) traced_model = torch.jit.trace( model, (input_ids, bbox, image), check_trace=False ) # when traced model is checked, an error is produced due to name mangling else: main_input = inputs[main_input_name] model(main_input) traced_model = torch.jit.trace(model, main_input) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): if layer_name in loaded_model_state_dict: p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() def test_torch_fx(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() self._create_and_check_torch_fx_tracing(config, inputs_dict) def test_torch_fx_output_loss(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() self._create_and_check_torch_fx_tracing(config, inputs_dict, output_loss=True) def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): if not is_torch_fx_available() or not self.fx_compatible: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) try: if model.config.is_encoder_decoder: model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward labels = inputs.get("labels", None) input_names = [ "attention_mask", "decoder_attention_mask", "decoder_input_ids", "input_features", "input_ids", "input_values", ] if labels is not None: input_names.append("labels") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) model_output = model(**filtered_inputs) traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) else: input_names = [ "attention_mask", "bbox", "input_features", "input_ids", "input_values", "pixel_values", "token_type_ids", "visual_feats", "visual_pos", ] labels = inputs.get("labels", None) start_positions = inputs.get("start_positions", None) end_positions = inputs.get("end_positions", None) if labels is not None: input_names.append("labels") if start_positions is not None: input_names.append("start_positions") if end_positions is not None: input_names.append("end_positions") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) if model.__class__.__name__ in set(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES.values()) and ( not hasattr(model.config, "problem_type") or model.config.problem_type is None ): model.config.problem_type = "single_label_classification" traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) model_output = model(**filtered_inputs) except Exception as e: self.fail(f"Couldn't trace module: {e}") def flatten_output(output): flatten = [] for x in output: if isinstance(x, (tuple, list)): flatten += flatten_output(x) elif not isinstance(x, torch.Tensor): continue else: flatten.append(x) return flatten model_output = flatten_output(model_output) traced_output = flatten_output(traced_output) num_outputs = len(model_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], traced_output[i]), f"traced {i}th output doesn't match model {i}th output for {model_class}", ) # Test that the model can be serialized and restored properly with tempfile.TemporaryDirectory() as tmp_dir_name: pkl_file_name = os.path.join(tmp_dir_name, "model.pkl") try: with open(pkl_file_name, "wb") as f: pickle.dump(traced_model, f) with open(pkl_file_name, "rb") as f: loaded = pickle.load(f) except Exception as e: self.fail(f"Couldn't serialize / deserialize the traced model: {e}") loaded_output = loaded(**filtered_inputs) loaded_output = flatten_output(loaded_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], loaded_output[i]), f"serialized model {i}th output doesn't match model {i}th output for {model_class}", ) # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() def test_headmasking(self): if not self.test_head_masking: return global_rng.seed(42) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() global_rng.seed() inputs_dict["output_attentions"] = True config.output_hidden_states = True configs_no_init = _config_zero_init(config) # To be sure we have no Nan for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() # Prepare head_mask # Set require_grad after having prepared the tensor to avoid error (leaf variable has been moved into the graph interior) head_mask = torch.ones( self.model_tester.num_hidden_layers, self.model_tester.num_attention_heads, device=torch_device, ) head_mask[0, 0] = 0 head_mask[-1, :-1] = 0 head_mask.requires_grad_(requires_grad=True) inputs = self._prepare_for_class(inputs_dict, model_class).copy() inputs["head_mask"] = head_mask if model.config.is_encoder_decoder: signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] if "decoder_head_mask" in arg_names: # necessary diferentiation because of T5 model inputs["decoder_head_mask"] = head_mask if "cross_attn_head_mask" in arg_names: inputs["cross_attn_head_mask"] = head_mask outputs = model(**inputs, return_dict=True) # Test that we can get a gradient back for importance score computation output = sum(t.sum() for t in outputs[0]) output = output.sum() output.backward() multihead_outputs = head_mask.grad self.assertIsNotNone(multihead_outputs) self.assertEqual(len(multihead_outputs), self.model_tester.num_hidden_layers) def check_attentions_validity(attentions): # Remove Nan for t in attentions: self.assertLess( torch.sum(torch.isnan(t)), t.numel() / 4 ) # Check we don't have more than 25% nans (arbitrary) attentions = [ t.masked_fill(torch.isnan(t), 0.0) for t in attentions ] # remove them (the test is less complete) self.assertAlmostEqual(attentions[0][..., 0, :, :].flatten().sum().item(), 0.0) self.assertNotEqual(attentions[0][..., -1, :, :].flatten().sum().item(), 0.0) if len(attentions) > 2: # encoder-decoder models have only 2 layers in each module self.assertNotEqual(attentions[1][..., 0, :, :].flatten().sum().item(), 0.0) self.assertAlmostEqual(attentions[-1][..., -2, :, :].flatten().sum().item(), 0.0) self.assertNotEqual(attentions[-1][..., -1, :, :].flatten().sum().item(), 0.0) if model.config.is_encoder_decoder: check_attentions_validity(outputs.encoder_attentions) check_attentions_validity(outputs.decoder_attentions) check_attentions_validity(outputs.cross_attentions) else: check_attentions_validity(outputs.attentions) def test_head_pruning(self): if not self.test_pruning: return for model_class in self.all_model_classes: ( config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if "head_mask" in inputs_dict: del inputs_dict["head_mask"] inputs_dict["output_attentions"] = True config.output_hidden_states = False model = model_class(config=config) model.to(torch_device) model.eval() heads_to_prune = { 0: list(range(1, self.model_tester.num_attention_heads)), -1: [0], } model.prune_heads(heads_to_prune) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], 1) self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads) self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1) def test_head_pruning_save_load_from_pretrained(self): if not self.test_pruning: return for model_class in self.all_model_classes: ( config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if "head_mask" in inputs_dict: del inputs_dict["head_mask"] inputs_dict["output_attentions"] = True config.output_hidden_states = False model = model_class(config=config) model.to(torch_device) model.eval() heads_to_prune = { 0: list(range(1, self.model_tester.num_attention_heads)), -1: [0], } model.prune_heads(heads_to_prune) with tempfile.TemporaryDirectory() as temp_dir_name: model.save_pretrained(temp_dir_name) model = model_class.from_pretrained(temp_dir_name) model.to(torch_device) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], 1) self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads) self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1) def test_head_pruning_save_load_from_config_init(self): if not self.test_pruning: return for model_class in self.all_model_classes: ( config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if "head_mask" in inputs_dict: del inputs_dict["head_mask"] inputs_dict["output_attentions"] = True config.output_hidden_states = False heads_to_prune = { 0: list(range(1, self.model_tester.num_attention_heads)), -1: [0], } config.pruned_heads = heads_to_prune model = model_class(config=config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], 1) self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads) self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1) def test_head_pruning_integration(self): if not self.test_pruning: return for model_class in self.all_model_classes: ( config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if "head_mask" in inputs_dict: del inputs_dict["head_mask"] inputs_dict["output_attentions"] = True config.output_hidden_states = False heads_to_prune = {0: [0], 1: [1, 2]} config.pruned_heads = heads_to_prune model = model_class(config=config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1) self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2) self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads) self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads) with tempfile.TemporaryDirectory() as temp_dir_name: model.save_pretrained(temp_dir_name) model = model_class.from_pretrained(temp_dir_name) model.to(torch_device) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1) self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2) self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads) self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads) heads_to_prune = {0: [0], 2: [1, 2]} model.prune_heads(heads_to_prune) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1) self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2) self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads - 2) self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads) self.assertDictEqual(model.config.pruned_heads, {0: [0], 1: [1, 2], 2: [1, 2]}) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1: seq_length = seq_length * self.model_tester.chunk_length else: seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] if config.is_encoder_decoder: # Seq2Seq models encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() decoder_hidden_states = outputs.decoder_hidden_states[0] decoder_hidden_states.retain_grad() if self.has_attentions: encoder_attentions = outputs.encoder_attentions[0] encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(decoder_hidden_states.grad) if self.has_attentions: self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) else: # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: attentions = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNotNone(attentions.grad) def test_feed_forward_chunking(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: torch.manual_seed(0) config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) model.eval() hidden_states_no_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0] torch.manual_seed(0) config.chunk_size_feed_forward = 1 model = model_class(config) model.to(torch_device) model.eval() hidden_states_with_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0] self.assertTrue(torch.allclose(hidden_states_no_chunk, hidden_states_with_chunk, atol=1e-3)) def test_resize_position_vector_embeddings(self): if not self.test_resize_position_embeddings: return ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() max_position_embeddings = config.max_position_embeddings # Retrieve the embeddings and clone theme if model.config.is_encoder_decoder: encoder_model_embed, decoder_model_embed = model.get_position_embeddings() encoder_cloned_embeddings = encoder_model_embed.weight.clone() decoder_cloned_embeddings = decoder_model_embed.weight.clone() else: model_embed = model.get_position_embeddings() cloned_embeddings = model_embed.weight.clone() # Check that resizing the position embeddings with a larger max_position_embeddings increases # the model's postion embeddings size model.resize_position_embeddings(max_position_embeddings + 10) self.assertEqual(model.config.max_position_embeddings, max_position_embeddings + 10) # Check that it actually resizes the embeddings matrix if model.config.is_encoder_decoder: encoder_model_embed, decoder_model_embed = model.get_position_embeddings() self.assertEqual(encoder_model_embed.weight.shape[0], encoder_cloned_embeddings.shape[0] + 10) self.assertEqual(decoder_model_embed.weight.shape[0], decoder_cloned_embeddings.shape[0] + 10) else: model_embed = model.get_position_embeddings() self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the position embeddings with a smaller max_position_embeddings decreases # the model's max_position_embeddings model.resize_position_embeddings(max_position_embeddings - 5) self.assertEqual(model.config.max_position_embeddings, max_position_embeddings - 5) # Check that it actually resizes the embeddings matrix if model.config.is_encoder_decoder: encoder_model_embed, decoder_model_embed = model.get_position_embeddings() self.assertEqual(encoder_model_embed.weight.shape[0], encoder_cloned_embeddings.shape[0] - 5) self.assertEqual(decoder_model_embed.weight.shape[0], decoder_cloned_embeddings.shape[0] - 5) else: model_embed = model.get_position_embeddings() self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 5) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True if model.config.is_encoder_decoder: for p1, p2 in zip(encoder_cloned_embeddings, encoder_model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False for p1, p2 in zip(decoder_cloned_embeddings, decoder_model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False else: for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_resize_tokens_embeddings(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1) # make sure that decoder_input_ids are resized as well if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_resize_embeddings_untied(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return original_config.tie_word_embeddings = False # if model cannot untied embeddings -> leave test if original_config.tie_word_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) # if no output embeddings -> leave test if model.get_output_embeddings() is None: continue # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_vocab_size = config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1) if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) def test_model_common_attributes(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Embedding, AdaptiveEmbedding)) model.set_input_embeddings(nn.Embedding(10, 10)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model_main_input_name(self): for model_class in self.all_model_classes: model_signature = inspect.signature(getattr(model_class, "forward")) # The main input is the name of the argument after `self` observed_main_input_name = list(model_signature.parameters.keys())[1] self.assertEqual(model_class.main_input_name, observed_main_input_name) def test_correct_missing_keys(self): if not self.test_missing_keys: return config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) base_model_prefix = model.base_model_prefix if hasattr(model, base_model_prefix): extra_params = {k: v for k, v in model.named_parameters() if not k.startswith(base_model_prefix)} extra_params.update({k: v for k, v in model.named_buffers() if not k.startswith(base_model_prefix)}) # Some models define this as None if model._keys_to_ignore_on_load_missing: for key in model._keys_to_ignore_on_load_missing: extra_params.pop(key, None) if not extra_params: # In that case, we *are* on a head model, but every # single key is not actual parameters and this is # tested in `test_tied_model_weights_key_ignore` test. continue with tempfile.TemporaryDirectory() as temp_dir_name: model.base_model.save_pretrained(temp_dir_name) model, loading_info = model_class.from_pretrained(temp_dir_name, output_loading_info=True) self.assertGreater(len(loading_info["missing_keys"]), 0, model.__class__.__name__) def test_tie_model_weights(self): if not self.test_torchscript: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_same_values(layer_1, layer_2): equal = True for p1, p2 in zip(layer_1.weight, layer_2.weight): if p1.data.ne(p2.data).sum() > 0: equal = False return equal for model_class in self.all_model_classes: config.torchscript = True model_not_tied = model_class(config) if model_not_tied.get_output_embeddings() is None: continue config_tied = copy.deepcopy(config) config_tied.torchscript = False model_tied = model_class(config_tied) params_tied = list(model_tied.parameters()) # Check that the embedding layer and decoding layer are the same in size and in value # self.assertTrue(check_same_values(embeddings, decoding)) # # Check that after modification, they remain the same. # embeddings.weight.data.div_(2) # # Check that the embedding layer and decoding layer are the same in size and in value # self.assertTrue(embeddings.weight.shape, decoding.weight.shape) # self.assertTrue(check_same_values(embeddings, decoding)) # # Check that after modification, they remain the same. # decoding.weight.data.div_(4) # # Check that the embedding layer and decoding layer are the same in size and in value # self.assertTrue(embeddings.weight.shape, decoding.weight.shape) # self.assertTrue(check_same_values(embeddings, decoding)) # Check that after resize they remain tied. model_tied.resize_token_embeddings(config.vocab_size + 10) params_tied_2 = list(model_tied.parameters()) self.assertEqual(len(params_tied_2), len(params_tied)) # decoding.weight.data.mul_(20) # # Check that the embedding layer and decoding layer are the same in size and in value # self.assertTrue(model.transformer.wte.weight.shape, model.lm_head.weight.shape) # self.assertTrue(check_same_values(model.transformer.wte, model.lm_head)) @require_safetensors def test_can_use_safetensors(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model_tied = model_class(config) with tempfile.TemporaryDirectory() as d: try: model_tied.save_pretrained(d, safe_serialization=True) except Exception as e: raise Exception(f"Class {model_class.__name__} cannot be saved using safetensors: {e}") model_reloaded, infos = model_class.from_pretrained(d, output_loading_info=True) # Checking the state dicts are correct reloaded_state = model_reloaded.state_dict() for k, v in model_tied.state_dict().items(): self.assertIn(k, reloaded_state, f"Key {k} is missing from reloaded") torch.testing.assert_close( v, reloaded_state[k], msg=lambda x: f"{model_class.__name__}: Tensor {k}: {x}" ) # Checking there was no complain of missing weights self.assertEqual(infos["missing_keys"], []) # Checking the tensor sharing are correct ptrs = defaultdict(list) for k, v in model_tied.state_dict().items(): ptrs[v.data_ptr()].append(k) shared_ptrs = {k: v for k, v in ptrs.items() if len(v) > 1} for _, shared_names in shared_ptrs.items(): reloaded_ptrs = {reloaded_state[k].data_ptr() for k in shared_names} self.assertEqual( len(reloaded_ptrs), 1, f"The shared pointers are incorrect, found different pointers for keys {shared_names}", ) def test_load_save_without_tied_weights(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() config.tie_word_embeddings = False for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as d: model.save_pretrained(d) model_reloaded, infos = model_class.from_pretrained(d, output_loading_info=True) # Checking the state dicts are correct reloaded_state = model_reloaded.state_dict() for k, v in model.state_dict().items(): self.assertIn(k, reloaded_state, f"Key {k} is missing from reloaded") torch.testing.assert_close( v, reloaded_state[k], msg=lambda x: f"{model_class.__name__}: Tensor {k}: {x}" ) # Checking there was no complain of missing weights self.assertEqual(infos["missing_keys"], []) def test_tied_weights_keys(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() config.tie_word_embeddings = True for model_class in self.all_model_classes: model_tied = model_class(config) ptrs = collections.defaultdict(list) for name, tensor in model_tied.state_dict().items(): ptrs[id_tensor_storage(tensor)].append(name) # These are all the pointers of shared tensors. tied_params = [names for _, names in ptrs.items() if len(names) > 1] tied_weight_keys = model_tied._tied_weights_keys if model_tied._tied_weights_keys is not None else [] # Detect we get a hit for each key for key in tied_weight_keys: if not any(re.search(key, p) for group in tied_params for p in group): raise ValueError(f"{key} is not a tied weight key for {model_class}.") # Removed tied weights found from tied params -> there should only be one left after for key in tied_weight_keys: for i in range(len(tied_params)): tied_params[i] = [p for p in tied_params[i] if re.search(key, p) is None] tied_params = [group for group in tied_params if len(group) > 1] self.assertListEqual( tied_params, [], f"Missing `_tied_weights_keys` for {model_class}: add all of {tied_params} except one.", ) def test_model_weights_reload_no_missing_tied_weights(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) # We are nuking ALL weights on file, so every parameter should # yell on load. We're going to detect if we yell too much, or too little. with open(os.path.join(tmp_dir, "pytorch_model.bin"), "wb") as f: torch.save({}, f) model_reloaded, infos = model_class.from_pretrained(tmp_dir, output_loading_info=True) prefix = f"{model_reloaded.base_model_prefix}." params = dict(model_reloaded.named_parameters()) params.update(dict(model_reloaded.named_buffers())) param_names = {k[len(prefix) :] if k.startswith(prefix) else k for k in params.keys()} missing_keys = set(infos["missing_keys"]) extra_missing = missing_keys - param_names # Remove tied weights from extra missing: they are normally not warned as missing if their tied # counterpart is present but here there are no weights at all so we do get the warning. ptrs = collections.defaultdict(list) for name, tensor in model_reloaded.state_dict().items(): ptrs[id_tensor_storage(tensor)].append(name) tied_params = [names for _, names in ptrs.items() if len(names) > 1] for group in tied_params: group = {k[len(prefix) :] if k.startswith(prefix) else k for k in group} # We remove the group from extra_missing if not all weights from group are in it if len(group - extra_missing) > 0: extra_missing = extra_missing - set(group) self.assertEqual( extra_missing, set(), f"This model {model_class.__name__} might be missing some `keys_to_ignore`: {extra_missing}. " f"For debugging, tied parameters are {tied_params}", ) missed_missing = param_names - missing_keys # Remove nonpersistent buffers from missed_missing buffers = [n for n, _ in model_reloaded.named_buffers()] nonpersistent_buffers = {n for n in buffers if n not in model_reloaded.state_dict()} nonpersistent_buffers = { k[len(prefix) :] if k.startswith(prefix) else k for k in nonpersistent_buffers } missed_missing = missed_missing - nonpersistent_buffers if model_reloaded._keys_to_ignore_on_load_missing is None: expected_missing = set() else: expected_missing = set(model_reloaded._keys_to_ignore_on_load_missing) self.assertEqual( missed_missing, expected_missing, f"This model {model_class.__name__} ignores keys {missed_missing} but they look like real" " parameters. If they are non persistent buffers make sure to instantiate them with" " `persistent=False`", ) def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) if self.has_attentions: tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence( model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True} ) # Don't copy this method to model specific test file! # TODO: remove this method once the issues are all fixed! def _make_attention_mask_non_null(self, inputs_dict): """Make sure no sequence has all zeros as attention mask""" for k in ["attention_mask", "encoder_attention_mask", "decoder_attention_mask"]: if k in inputs_dict: attention_mask = inputs_dict[k] # Make sure no all 0s attention masks - to avoid failure at this moment. # Put `1` at the beginning of sequences to make it still work when combining causal attention masks. # TODO: remove this line once a fix regarding large negative values for attention mask is done. attention_mask = torch.cat( [torch.ones_like(attention_mask[:, :1], dtype=attention_mask.dtype), attention_mask[:, 1:]], dim=-1 ) # Here we make the first sequence with all 0s as attention mask. # Currently, this will fail for `TFWav2Vec2Model`. This is caused by the different large negative # values, like `1e-4`, `1e-9`, `1e-30` and `-inf` for attention mask across models/frameworks. # TODO: enable this block once the large negative values thing is cleaned up. # (see https://github.com/huggingface/transformers/issues/14859) # attention_mask = torch.cat( # [torch.zeros_like(attention_mask[:1], dtype=attention_mask.dtype), attention_mask[1:]], # dim=0 # ) inputs_dict[k] = attention_mask # Don't copy this method to model specific test file! # TODO: remove this method once the issues are all fixed! def _postprocessing_to_ignore_test_cases(self, tf_outputs, pt_outputs, model_class): """For temporarily ignoring some failed test cases (issues to be fixed)""" tf_keys = {k for k, v in tf_outputs.items() if v is not None} pt_keys = {k for k, v in pt_outputs.items() if v is not None} key_differences = tf_keys.symmetric_difference(pt_keys) if model_class.__name__ in [ "FlaubertWithLMHeadModel", "FunnelForPreTraining", "ElectraForPreTraining", "XLMWithLMHeadModel", "TransfoXLLMHeadModel", ]: for k in key_differences: if k in ["loss", "losses"]: tf_keys.discard(k) pt_keys.discard(k) elif model_class.__name__.startswith("GPT2"): # `TFGPT2` has `past_key_values` as a tensor while `GPT2` has it as a tuple. tf_keys.discard("past_key_values") pt_keys.discard("past_key_values") # create new outputs from the remaining fields new_tf_outputs = type(tf_outputs)(**{k: tf_outputs[k] for k in tf_keys}) new_pt_outputs = type(pt_outputs)(**{k: pt_outputs[k] for k in pt_keys}) return new_tf_outputs, new_pt_outputs # Copied from tests.test_modeling_tf_common.TFModelTesterMixin.check_pt_tf_outputs def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None): """Check the outputs from PyTorch and TensorFlow models are close enough. Checks are done in a recursive way. Args: model_class: The class of the model that is currently testing. For example, `TFBertModel`, TFBertForMaskedLM`, `TFBertForSequenceClassification`, etc. Mainly used for providing more informative error messages. name (`str`): The name of the output. For example, `output.hidden_states`, `output.attentions`, etc. attributes (`Tuple[str]`): The names of the output's element if the output is a tuple/list with each element being a named field in the output. """ self.assertEqual(type(name), str) if attributes is not None: self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") # Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`). if isinstance(tf_outputs, ModelOutput): self.assertTrue( isinstance(pt_outputs, ModelOutput), f"{name}: `pt_outputs` should an instance of `ModelOutput` when `tf_outputs` is", ) # Don't copy this block to model specific test file! # TODO: remove this method and this line after issues are fixed tf_outputs, pt_outputs = self._postprocessing_to_ignore_test_cases(tf_outputs, pt_outputs, model_class) tf_keys = [k for k, v in tf_outputs.items() if v is not None] pt_keys = [k for k, v in pt_outputs.items() if v is not None] self.assertEqual(tf_keys, pt_keys, f"{name}: Output keys differ between TF and PyTorch") # convert to the case of `tuple` # appending each key to the current (string) `name` attributes = tuple([f"{name}.{k}" for k in tf_keys]) self.check_pt_tf_outputs( tf_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes ) # Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.) elif type(tf_outputs) in [tuple, list]: self.assertEqual(type(tf_outputs), type(pt_outputs), f"{name}: Output types differ between TF and PyTorch") self.assertEqual(len(tf_outputs), len(pt_outputs), f"{name}: Output lengths differ between TF and PyTorch") if attributes is not None: # case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`) self.assertEqual( len(attributes), len(tf_outputs), f"{name}: The tuple `attributes` should have the same length as `tf_outputs`", ) else: # case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `name` attributes = tuple([f"{name}_{idx}" for idx in range(len(tf_outputs))]) for tf_output, pt_output, attr in zip(tf_outputs, pt_outputs, attributes): self.check_pt_tf_outputs(tf_output, pt_output, model_class, tol=tol, name=attr) elif isinstance(tf_outputs, tf.Tensor): self.assertTrue( isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `tf_outputs` is" ) tf_outputs = tf_outputs.numpy() pt_outputs = pt_outputs.detach().to("cpu").numpy() self.assertEqual( tf_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between TF and PyTorch" ) # deal with NumPy's scalars to make replacing nan values by 0 work. if np.isscalar(tf_outputs): tf_outputs = np.array([tf_outputs]) pt_outputs = np.array([pt_outputs]) tf_nans = np.isnan(tf_outputs) pt_nans = np.isnan(pt_outputs) pt_outputs[tf_nans] = 0 tf_outputs[tf_nans] = 0 pt_outputs[pt_nans] = 0 tf_outputs[pt_nans] = 0 max_diff = np.amax(np.abs(tf_outputs - pt_outputs)) self.assertLessEqual(max_diff, tol, f"{name}: Difference between PyTorch and TF is {max_diff} (>= {tol}).") else: raise ValueError( "`tf_outputs` should be an instance of `ModelOutput`, a `tuple`, or an instance of `tf.Tensor`. Got" f" {type(tf_outputs)} instead." ) def prepare_tf_inputs_from_pt_inputs(self, pt_inputs_dict): tf_inputs_dict = {} for key, tensor in pt_inputs_dict.items(): # skip key that does not exist in tf if type(tensor) == bool: tf_inputs_dict[key] = tensor elif key == "input_values": tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32) elif key == "pixel_values": tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32) elif key == "input_features": tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32) # other general float inputs elif tensor.is_floating_point(): tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32) else: tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.int32) return tf_inputs_dict def check_pt_tf_models(self, tf_model, pt_model, pt_inputs_dict): tf_inputs_dict = self.prepare_tf_inputs_from_pt_inputs(pt_inputs_dict) # send pytorch inputs to the correct device pt_inputs_dict = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs_dict.items() } # send pytorch model to the correct device pt_model.to(torch_device) # Check predictions on first output (logits/hidden-states) are close enough given low-level computational differences pt_model.eval() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs_dict) tf_outputs = tf_model(tf_inputs_dict) # tf models returned loss is usually a tensor rather than a scalar. # (see `hf_compute_loss`: it uses `tf.keras.losses.Reduction.NONE`) # Change it here to a scalar to match PyTorch models' loss tf_loss = getattr(tf_outputs, "loss", None) if tf_loss is not None: tf_outputs.loss = tf.math.reduce_mean(tf_loss) self.check_pt_tf_outputs(tf_outputs, pt_outputs, type(pt_model)) @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self, allow_missing_keys=False): import transformers for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() tf_model_class_name = "TF" + model_class.__name__ # Add the "TF" at the beginning if not hasattr(transformers, tf_model_class_name): # transformers does not have this model in TF version yet return # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions # Make sure no sequence has all zeros as attention mask, otherwise some tests fail due to the inconsistency # of the usage `1e-4`, `1e-9`, `1e-30`, `-inf`. # TODO: Use a uniform value for all models, make sure all tests pass without this processing, and remove it. self._make_attention_mask_non_null(inputs_dict) tf_model_class = getattr(transformers, tf_model_class_name) pt_model = model_class(config) tf_model = tf_model_class(config) pt_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs_dict_with_labels = self._prepare_for_class( inputs_dict, model_class, # Not all models accept "labels" in the forward pass (yet :) ) return_labels=True if "labels" in inspect.signature(model_class.forward).parameters.keys() else False, ) # make sure only tf inputs are forward that actually exist in function args tf_input_keys = set(inspect.signature(tf_model.call).parameters.keys()) # remove all head masks tf_input_keys.discard("head_mask") tf_input_keys.discard("cross_attn_head_mask") tf_input_keys.discard("decoder_head_mask") pt_inputs_dict = {k: v for k, v in pt_inputs_dict.items() if k in tf_input_keys} pt_inputs_dict_with_labels = {k: v for k, v in pt_inputs_dict_with_labels.items() if k in tf_input_keys} # For some models (e.g. base models), there is no label returned. # Set the input dict to `None` to avoid check outputs twice for the same input dicts. if not set(pt_inputs_dict_with_labels.keys()).symmetric_difference(pt_inputs_dict.keys()): pt_inputs_dict_with_labels = None # Check we can load pt model in tf and vice-versa with model => model functions # Here requires `tf_inputs_dict` to build `tf_model` tf_inputs_dict = self.prepare_tf_inputs_from_pt_inputs(pt_inputs_dict) tf_model = transformers.load_pytorch_model_in_tf2_model( tf_model, pt_model, tf_inputs=tf_inputs_dict, allow_missing_keys=allow_missing_keys ) pt_model = transformers.load_tf2_model_in_pytorch_model( pt_model, tf_model, allow_missing_keys=allow_missing_keys ) # Original test: check without `labels` self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict) # check with `labels` if pt_inputs_dict_with_labels: self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict_with_labels) # Check we can load pt model in tf and vice-versa with checkpoint => model functions with tempfile.TemporaryDirectory() as tmpdirname: pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin") torch.save(pt_model.state_dict(), pt_checkpoint_path) tf_model = transformers.load_pytorch_checkpoint_in_tf2_model( tf_model, pt_checkpoint_path, allow_missing_keys=allow_missing_keys ) tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5") tf_model.save_weights(tf_checkpoint_path) pt_model = transformers.load_tf2_checkpoint_in_pytorch_model( pt_model, tf_checkpoint_path, allow_missing_keys=allow_missing_keys ) # Original test: check without `labels` self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict) # check with `labels` if pt_inputs_dict_with_labels: self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict_with_labels) def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).") def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None): """ Args: model_class: The class of the model that is currently testing. For example, ..., etc. Currently unused, but it could make debugging easier and faster. names: A string, or a list of strings. These specify what fx_outputs/pt_outputs represent in the model outputs. Currently unused, but in the future, we could use this information to make the error message clearer by giving the name(s) of the output tensor(s) with large difference(s) between PT and Flax. """ self.assertEqual(type(name), str) if attributes is not None: self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") # Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`). if isinstance(fx_outputs, ModelOutput): self.assertTrue( isinstance(pt_outputs, ModelOutput), f"{name}: `pt_outputs` should an instance of `ModelOutput` when `fx_outputs` is", ) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys, f"{name}: Output keys differ between Flax and PyTorch") # convert to the case of `tuple` # appending each key to the current (string) `name` attributes = tuple([f"{name}.{k}" for k in fx_keys]) self.check_pt_flax_outputs( fx_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes ) # Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.) elif type(fx_outputs) in [tuple, list]: self.assertEqual( type(fx_outputs), type(pt_outputs), f"{name}: Output types differ between Flax and PyTorch" ) self.assertEqual( len(fx_outputs), len(pt_outputs), f"{name}: Output lengths differ between Flax and PyTorch" ) if attributes is not None: # case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`) self.assertEqual( len(attributes), len(fx_outputs), f"{name}: The tuple `attributes` should have the same length as `fx_outputs`", ) else: # case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `name` attributes = tuple([f"{name}_{idx}" for idx in range(len(fx_outputs))]) for fx_output, pt_output, attr in zip(fx_outputs, pt_outputs, attributes): self.check_pt_flax_outputs(fx_output, pt_output, model_class, tol=tol, name=attr) elif isinstance(fx_outputs, jnp.ndarray): self.assertTrue( isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `fx_outputs` is" ) # Using `np.asarray` gives `ValueError: assignment destination is read-only` at the line `fx_outputs[fx_nans] = 0`. fx_outputs = np.array(fx_outputs) pt_outputs = pt_outputs.detach().to("cpu").numpy() self.assertEqual( fx_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between Flax and PyTorch" ) # deal with NumPy's scalars to make replacing nan values by 0 work. if np.isscalar(fx_outputs): fx_outputs = np.array([fx_outputs]) pt_outputs = np.array([pt_outputs]) fx_nans = np.isnan(fx_outputs) pt_nans = np.isnan(pt_outputs) pt_outputs[fx_nans] = 0 fx_outputs[fx_nans] = 0 pt_outputs[pt_nans] = 0 fx_outputs[pt_nans] = 0 max_diff = np.amax(np.abs(fx_outputs - pt_outputs)) self.assertLessEqual( max_diff, tol, f"{name}: Difference between PyTorch and Flax is {max_diff} (>= {tol})." ) else: raise ValueError( "`fx_outputs` should be an instance of `ModelOutput`, a `tuple`, or an instance of `jnp.ndarray`. Got" f" {type(fx_outputs)} instead." ) @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): # no flax model exists for this class return # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions fx_model_class = getattr(transformers, fx_model_class_name) # load PyTorch class pt_model = model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False # load Flax class fx_model = fx_model_class(config, dtype=jnp.float32) # make sure only flax inputs are forward that actually exist in function args fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() # prepare inputs pt_inputs = self._prepare_for_class(inputs_dict, model_class) # remove function args that don't exist in Flax pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} # send pytorch inputs to the correct device pt_inputs = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs.items() } # convert inputs to Flax fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state # send pytorch model to the correct device pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = fx_model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs_loaded.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs_loaded, pt_outputs, model_class) @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): # no flax model exists for this class return # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions fx_model_class = getattr(transformers, fx_model_class_name) # load PyTorch class pt_model = model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False # load Flax class fx_model = fx_model_class(config, dtype=jnp.float32) # make sure only flax inputs are forward that actually exist in function args fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() # prepare inputs pt_inputs = self._prepare_for_class(inputs_dict, model_class) # remove function args that don't exist in Flax pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} # send pytorch inputs to the correct device pt_inputs = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs.items() } # convert inputs to Flax fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) # make sure weights are tied in PyTorch pt_model.tie_weights() # send pytorch model to the correct device pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = model_class.from_pretrained(tmpdirname, from_flax=True) # send pytorch model to the correct device pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs_loaded.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs_loaded, model_class) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] @require_torch_multi_gpu def test_multi_gpu_data_parallel_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # some params shouldn't be scattered by nn.DataParallel # so just remove them if they are present. blacklist_non_batched_params = ["head_mask", "decoder_head_mask", "cross_attn_head_mask"] for k in blacklist_non_batched_params: inputs_dict.pop(k, None) # move input tensors to cuda:O for k, v in inputs_dict.items(): if torch.is_tensor(v): inputs_dict[k] = v.to(0) for model_class in self.all_model_classes: model = model_class(config=config) model.to(0) model.eval() # Wrap model in nn.DataParallel model = nn.DataParallel(model) with torch.no_grad(): _ = model(**self._prepare_for_class(inputs_dict, model_class)) @require_torch_multi_gpu def test_model_parallelization(self): if not self.test_model_parallel: return # a candidate for testing_utils def get_current_gpu_memory_use(): """returns a list of cuda memory allocations per GPU in MBs""" per_device_memory = [] for id in range(torch.cuda.device_count()): with torch.cuda.device(id): per_device_memory.append(torch.cuda.memory_allocated() >> 20) return per_device_memory # Needs a large model to see the difference. config = self.model_tester.get_large_model_config() for model_class in self.all_parallelizable_model_classes: torch.cuda.empty_cache() # 1. single gpu memory load + unload + memory measurements # Retrieve initial memory usage (can easily be ~0.6-1.5GB if cuda-kernels have been preloaded by previous tests) memory_at_start = get_current_gpu_memory_use() # Put model on device 0 and take a memory snapshot model = model_class(config) model.to("cuda:0") memory_after_model_load = get_current_gpu_memory_use() # The memory use on device 0 should be higher than it was initially. self.assertGreater(memory_after_model_load[0], memory_at_start[0]) del model gc.collect() torch.cuda.empty_cache() # 2. MP test # it's essential to re-calibrate the usage before the next stage memory_at_start = get_current_gpu_memory_use() # Spread model layers over multiple devices model = model_class(config) model.parallelize() memory_after_parallelization = get_current_gpu_memory_use() # Assert that the memory use on all devices is higher than it was when loaded only on CPU for n in range(len(model.device_map.keys())): self.assertGreater(memory_after_parallelization[n], memory_at_start[n]) # Assert that the memory use of device 0 is lower than it was when the entire model was loaded on it self.assertLess(memory_after_parallelization[0], memory_after_model_load[0]) # Assert that the memory use of device 1 is higher than it was when the entire model was loaded # on device 0 and device 1 wasn't used at all self.assertGreater(memory_after_parallelization[1], memory_after_model_load[1]) del model gc.collect() torch.cuda.empty_cache() @require_torch_multi_gpu def test_model_parallel_equal_results(self): if not self.test_model_parallel: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_parallelizable_model_classes: inputs_dict = self._prepare_for_class(inputs_dict, model_class) def cast_to_device(dictionary, device): output = {} for k, v in dictionary.items(): if isinstance(v, torch.Tensor): output[k] = v.to(device) else: output[k] = v return output model = model_class(config) output = model(**cast_to_device(inputs_dict, "cpu")) model.parallelize() parallel_output = model(**cast_to_device(inputs_dict, "cuda:0")) for value, parallel_value in zip(output, parallel_output): if isinstance(value, torch.Tensor): self.assertTrue(torch.allclose(value, parallel_value.to("cpu"), atol=1e-7)) elif isinstance(value, (Tuple, List)): for value_, parallel_value_ in zip(value, parallel_value): self.assertTrue(torch.allclose(value_, parallel_value_.to("cpu"), atol=1e-7)) @require_torch_multi_gpu def test_model_parallel_beam_search(self): if not self.test_model_parallel: return all_generative_and_parallelizable_model_classes = tuple( set(self.all_generative_model_classes).intersection(self.all_parallelizable_model_classes) ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in all_generative_and_parallelizable_model_classes: inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) def cast_to_device(dictionary, device): output = {} for k, v in dictionary.items(): if isinstance(v, torch.Tensor): output[k] = v.to(device) else: output[k] = v return output model.parallelize() model.generate(**cast_to_device(inputs_dict, "cuda:0"), num_beams=2) def check_device_map_is_respected(self, model, device_map): for param_name, param in model.named_parameters(): # Find device in device_map while len(param_name) > 0 and param_name not in device_map: param_name = ".".join(param_name.split(".")[:-1]) if param_name not in device_map: raise ValueError("device map is incomplete, it does not contain any device for `param_name`.") param_device = device_map[param_name] if param_device in ["cpu", "disk"]: self.assertEqual(param.device, torch.device("meta")) else: self.assertEqual(param.device, torch.device(param_device)) @require_accelerate @mark.accelerate_tests @require_torch_gpu def test_disk_offload(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._no_split_modules is None: continue inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) model = model_class(config).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict_class) model_size = compute_module_sizes(model)[""] max_size = int(self.model_split_percents[0] * model_size) with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) max_memory = {0: max_size, "cpu": max_size} with self.assertRaises(ValueError): # This errors out cause it's missing an offload folder new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) new_model = model_class.from_pretrained( tmp_dir, device_map="auto", max_memory=max_memory, offload_folder=tmp_dir ) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict_class) self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) @require_accelerate @mark.accelerate_tests @require_torch_gpu def test_cpu_offload(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._no_split_modules is None: continue inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) model = model_class(config).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict_class) model_size = compute_module_sizes(model)[""] # We test several splits of sizes to make sure it works. max_gpu_sizes = [int(p * model_size) for p in self.model_split_percents] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) for max_size in max_gpu_sizes: max_memory = {0: max_size, "cpu": model_size * 2} new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) # Making sure part of the model will actually end up offloaded self.assertSetEqual(set(new_model.hf_device_map.values()), {0, "cpu"}) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict_class) self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) @require_accelerate @mark.accelerate_tests @require_torch_multi_gpu def test_model_parallelism(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._no_split_modules is None: continue inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) model = model_class(config).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict_class) model_size = compute_module_sizes(model)[""] # We test several splits of sizes to make sure it works. max_gpu_sizes = [int(p * model_size) for p in self.model_split_percents] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) for max_size in max_gpu_sizes: max_memory = {0: max_size, 1: model_size * 2, "cpu": model_size * 2} new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) # Making sure part of the model will actually end up offloaded self.assertSetEqual(set(new_model.hf_device_map.values()), {0, 1}) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict_class) self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) def test_problem_types(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() problem_types = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if model_class.__name__ not in [ *get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES), ]: continue for problem_type in problem_types: with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}"): config.problem_type = problem_type["title"] config.num_labels = problem_type["num_labels"] model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) if problem_type["num_labels"] > 1: inputs["labels"] = inputs["labels"].unsqueeze(1).repeat(1, problem_type["num_labels"]) inputs["labels"] = inputs["labels"].to(problem_type["dtype"]) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=True) as warning_list: loss = model(**inputs).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message): raise ValueError( f"Something is going wrong in the regression problem: intercepted {w.message}" ) loss.backward() def test_load_with_mismatched_shapes(self): if not self.test_mismatched_shapes: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class.__name__ not in get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES): continue with self.subTest(msg=f"Testing {model_class}"): with tempfile.TemporaryDirectory() as tmp_dir: model = model_class(config) model.save_pretrained(tmp_dir) # Fails when we don't set ignore_mismatched_sizes=True with self.assertRaises(RuntimeError): new_model = AutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42) with self.assertRaises(RuntimeError): new_model_without_prefix = AutoModel.from_pretrained(tmp_dir, vocab_size=10) logger = logging.get_logger("transformers.modeling_utils") with CaptureLogger(logger) as cl: new_model = AutoModelForSequenceClassification.from_pretrained( tmp_dir, num_labels=42, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) new_model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) logits = new_model(**inputs).logits self.assertEqual(logits.shape[1], 42) with CaptureLogger(logger) as cl: new_model_without_prefix = AutoModel.from_pretrained( tmp_dir, vocab_size=10, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) input_ids = ids_tensor((2, 8), 10) new_model_without_prefix.to(torch_device) if self.is_encoder_decoder: new_model_without_prefix(input_ids, decoder_input_ids=input_ids) else: new_model_without_prefix(input_ids) def test_model_is_small(self): # Just a consistency check to make sure we are not running tests on 80M parameter models. config, _ = self.model_tester.prepare_config_and_inputs_for_common() # print(config) for model_class in self.all_model_classes: model = model_class(config) num_params = model.num_parameters() assert ( num_params < 1000000 ), f"{model_class} is too big for the common tests ({num_params})! It should have 200k max." global_rng = random.Random() def ids_tensor(shape, vocab_size, rng=None, name=None): # Creates a random int32 tensor of the shape within the vocab size if rng is None: rng = global_rng total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) return torch.tensor(data=values, dtype=torch.long, device=torch_device).view(shape).contiguous() def random_attention_mask(shape, rng=None, name=None): attn_mask = ids_tensor(shape, vocab_size=2, rng=None, name=None) # make sure that at least one token is attended to for each batch attn_mask[:, -1] = 1 return attn_mask def floats_tensor(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.random() * scale) return torch.tensor(data=values, dtype=torch.float, device=torch_device).view(shape).contiguous()
126,716
44.895328
147
py
transformers
transformers-main/tests/__init__.py
0
0
0
py
transformers
transformers-main/tests/test_modeling_tf_common.py
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import copy import inspect import json import os import random import tempfile import unittest from importlib import import_module from math import isnan from typing import List, Tuple from datasets import Dataset from transformers import is_tf_available, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import ( # noqa: F401 CaptureLogger, _tf_gpu_memory_limit, is_pt_tf_cross_test, require_tf, require_tf2onnx, slow, torch_device, ) from transformers.utils import CONFIG_NAME, GENERATION_CONFIG_NAME, logging from transformers.utils.generic import ModelOutput logger = logging.get_logger(__name__) if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TFAutoModel, TFAutoModelForSequenceClassification, TFSharedEmbeddings, ) from transformers.generation import ( TFBeamSampleDecoderOnlyOutput, TFBeamSampleEncoderDecoderOutput, TFBeamSearchDecoderOnlyOutput, TFBeamSearchEncoderDecoderOutput, TFGreedySearchDecoderOnlyOutput, TFGreedySearchEncoderDecoderOutput, TFSampleDecoderOnlyOutput, TFSampleEncoderDecoderOutput, ) tf.config.experimental.enable_tensor_float_32_execution(False) if _tf_gpu_memory_limit is not None: gpus = tf.config.list_physical_devices("GPU") for gpu in gpus: # Restrict TensorFlow to only allocate x GB of memory on the GPUs try: tf.config.set_logical_device_configuration( gpu, [tf.config.LogicalDeviceConfiguration(memory_limit=_tf_gpu_memory_limit)] ) logical_gpus = tf.config.list_logical_devices("GPU") print("Logical GPUs", logical_gpus) except RuntimeError as e: # Virtual devices must be set before GPUs have been initialized print(e) if is_torch_available(): import torch def _config_zero_init(config): configs_no_init = copy.deepcopy(config) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key: setattr(configs_no_init, key, 0.0) return configs_no_init @require_tf class TFModelTesterMixin: model_tester = None all_model_classes = () all_generative_model_classes = () test_mismatched_shapes = True test_resize_embeddings = True test_head_masking = True is_encoder_decoder = False has_attentions = True def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict: inputs_dict = copy.deepcopy(inputs_dict) if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict = { k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1)) if isinstance(v, tf.Tensor) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32) elif model_class in [ *get_values(TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING), *get_values(TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING), ]: inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in [ *get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING), *get_values(TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING), ]: inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in get_values(TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING): inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in [ *get_values(TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING), *get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING), *get_values(TF_MODEL_FOR_MASKED_LM_MAPPING), *get_values(TF_MODEL_FOR_PRETRAINING_MAPPING), *get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING), *get_values(TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING), ] and "labels" in dict(inspect.signature(model_class.call).parameters): inputs_dict["labels"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32 ) elif model_class in get_values(TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING): num_patches = self.model_tester.image_size // self.model_tester.patch_size inputs_dict["bool_masked_pos"] = tf.zeros( (self.model_tester.batch_size, num_patches**2), dtype=tf.int32 ) elif model_class in get_values(TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING): batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape inputs_dict["labels"] = tf.zeros((self.model_tester.batch_size, height, width), dtype=tf.int32) elif model_class.__name__.endswith("ForCTC"): # When we have enough CTC models for an AutoClass, we should use their mapping instead of name checks inputs_dict["labels"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32 ) return inputs_dict def test_initialization(self): pass def test_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=False) # the config file (and the generation config file, if it can generate) should be saved self.assertTrue(os.path.exists(os.path.join(tmpdirname, CONFIG_NAME))) self.assertEqual( model.can_generate(), os.path.exists(os.path.join(tmpdirname, GENERATION_CONFIG_NAME)) ) model = model_class.from_pretrained(tmpdirname) after_outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assert_outputs_same(after_outputs, outputs) def test_save_load_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) model_config = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(model_config) new_model = model_class.from_config(model.get_config()) # make sure it also accepts a normal config _ = model_class.from_config(model.config) _ = new_model(self._prepare_for_class(inputs_dict, model_class)) # Build model new_model.set_weights(model.get_weights()) after_outputs = new_model(self._prepare_for_class(inputs_dict, model_class)) self.assert_outputs_same(after_outputs, outputs) @slow def test_saved_model_creation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = False config.output_attentions = False if hasattr(config, "use_cache"): config.use_cache = False model_class = self.all_model_classes[0] class_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) model(class_inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=True) saved_model_dir = os.path.join(tmpdirname, "saved_model", "1") self.assertTrue(os.path.exists(saved_model_dir)) def test_prepare_serving_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions for model_class in self.all_model_classes: model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(inputs) serving_outputs = model.serving_output(outputs) for k, v in serving_outputs.items(): # Check that we have one of three possible outputs: None, tuple of tensors or a tensor if isinstance(v, tuple): self.assertTrue(all(isinstance(elem, tf.Tensor) for elem in v)) elif v is not None: self.assertIsInstance(v, tf.Tensor) else: self.assertIsNone(v) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = [ "input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend(["decoder_position_ids"] if "decoder_position_ids" in arg_names else []) expected_arg_names.extend( ["head_mask", "decoder_head_mask"] if "head_mask" and "decoder_head_mask" in arg_names else [] ) expected_arg_names.extend( ["cross_attn_head_mask", "encoder_outputs"] if "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["input_ids"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_onnx_compliancy(self): if not self.test_onnx: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() INTERNAL_OPS = [ "Assert", "AssignVariableOp", "EmptyTensorList", "ReadVariableOp", "ResourceGather", "TruncatedNormal", "VarHandleOp", "VarIsInitializedOp", ] onnx_ops = [] with open(os.path.join(".", "utils", "tf_ops", "onnx.json")) as f: onnx_opsets = json.load(f)["opsets"] for i in range(1, self.onnx_min_opset + 1): onnx_ops.extend(onnx_opsets[str(i)]) for model_class in self.all_model_classes: model_op_names = set() with tf.Graph().as_default() as g: model = model_class(config) model.build() for op in g.get_operations(): model_op_names.add(op.node_def.op) model_op_names = sorted(model_op_names) incompatible_ops = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(op) self.assertEqual(len(incompatible_ops), 0, incompatible_ops) @require_tf2onnx @slow def test_onnx_runtime_optimize(self): if not self.test_onnx: return import onnxruntime import tf2onnx config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:2]: model = model_class(config) model.build() onnx_model_proto, _ = tf2onnx.convert.from_keras(model, opset=self.onnx_min_opset) onnxruntime.InferenceSession(onnx_model_proto.SerializeToString()) def test_keras_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() tf_main_layer_classes = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__),) for module_member_name in dir(module) if module_member_name.endswith("MainLayer") # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("MainLayer")] == model_class.__name__[: -len("Model")] for module_member in (getattr(module, module_member_name),) if isinstance(module_member, type) and tf.keras.layers.Layer in module_member.__bases__ and getattr(module_member, "_keras_serializable", False) } for main_layer_class in tf_main_layer_classes: # T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter if "T5" in main_layer_class.__name__: # Take the same values than in TFT5ModelTester for this shared layer shared = TFSharedEmbeddings(99, 32, name="shared") config.use_cache = inputs_dict.pop("use_cache", None) main_layer = main_layer_class(config, embed_tokens=shared) else: main_layer = main_layer_class(config) symbolic_inputs = { name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items() } model = tf.keras.Model(symbolic_inputs, outputs=main_layer(symbolic_inputs)) outputs = model(inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: filepath = os.path.join(tmpdirname, "keras_model.h5") model.save(filepath) if "T5" in main_layer_class.__name__: model = tf.keras.models.load_model( filepath, custom_objects={ main_layer_class.__name__: main_layer_class, "TFSharedEmbeddings": TFSharedEmbeddings, }, ) else: model = tf.keras.models.load_model( filepath, custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(model, tf.keras.Model) after_outputs = model(inputs_dict) self.assert_outputs_same(after_outputs, outputs) def assert_outputs_same(self, after_outputs, outputs): # Make sure we don't have nans if isinstance(after_outputs, tf.Tensor): out_1 = after_outputs.numpy() elif isinstance(after_outputs, dict): out_1 = after_outputs[list(after_outputs.keys())[0]].numpy() else: out_1 = after_outputs[0].numpy() out_2 = outputs[0].numpy() self.assertEqual(out_1.shape, out_2.shape) out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) # Don't copy this method to model specific test file! # TODO: remove this method once the issues are all fixed! def _make_attention_mask_non_null(self, inputs_dict): """Make sure no sequence has all zeros as attention mask""" for k in ["attention_mask", "encoder_attention_mask", "decoder_attention_mask"]: if k in inputs_dict: attention_mask = inputs_dict[k] # Make sure no all 0s attention masks - to avoid failure at this moment. # Put `1` at the beginning of sequences to make it still work when combining causal attention masks. # TODO: remove this line once a fix regarding large negative values for attention mask is done. attention_mask = tf.concat( [tf.ones_like(attention_mask[:, :1], dtype=attention_mask.dtype), attention_mask[:, 1:]], axis=-1 ) # Here we make the first sequence with all 0s as attention mask. # Currently, this will fail for `TFWav2Vec2Model`. This is caused by the different large negative # values, like `1e-4`, `1e-9`, `1e-30` and `-inf` for attention mask across models/frameworks. # TODO: enable this block once the large negative values thing is cleaned up. # (see https://github.com/huggingface/transformers/issues/14859) # attention_mask = tf.concat( # [ # tf.zeros_like(attention_mask[:1], dtype=tf.int32), # tf.cast(attention_mask[1:], dtype=tf.int32) # ], # axis=0 # ) inputs_dict[k] = attention_mask # Don't copy this method to model specific test file! # TODO: remove this method once the issues are all fixed! def _postprocessing_to_ignore_test_cases(self, tf_outputs, pt_outputs, model_class): """For temporarily ignoring some failed test cases (issues to be fixed)""" tf_keys = {k for k, v in tf_outputs.items() if v is not None} pt_keys = {k for k, v in pt_outputs.items() if v is not None} key_differences = tf_keys.symmetric_difference(pt_keys) if model_class.__name__ in [ "TFFlaubertWithLMHeadModel", "TFFunnelForPreTraining", "TFElectraForPreTraining", "TFXLMWithLMHeadModel", "TFTransfoXLLMHeadModel", ]: for k in key_differences: if k in ["loss", "losses"]: tf_keys.discard(k) pt_keys.discard(k) elif model_class.__name__.startswith("TFGPT2"): # `TFGPT2` has `past_key_values` as a tensor while `GPT2` has it as a tuple. tf_keys.discard("past_key_values") pt_keys.discard("past_key_values") # create new outputs from the remaining fields new_tf_outputs = type(tf_outputs)(**{k: tf_outputs[k] for k in tf_keys}) new_pt_outputs = type(pt_outputs)(**{k: pt_outputs[k] for k in pt_keys}) return new_tf_outputs, new_pt_outputs def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None): """Check the outputs from PyTorch and TensorFlow models are close enough. Checks are done in a recursive way. Args: model_class: The class of the model that is currently testing. For example, `TFBertModel`, TFBertForMaskedLM`, `TFBertForSequenceClassification`, etc. Mainly used for providing more informative error messages. name (`str`): The name of the output. For example, `output.hidden_states`, `output.attentions`, etc. attributes (`Tuple[str]`): The names of the output's element if the output is a tuple/list with each element being a named field in the output. """ self.assertEqual(type(name), str) if attributes is not None: self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") # Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`). if isinstance(tf_outputs, ModelOutput): self.assertTrue( isinstance(pt_outputs, ModelOutput), f"{name}: `pt_outputs` should an instance of `ModelOutput` when `tf_outputs` is", ) # Don't copy this block to model specific test file! # TODO: remove this method and this line after issues are fixed tf_outputs, pt_outputs = self._postprocessing_to_ignore_test_cases(tf_outputs, pt_outputs, model_class) tf_keys = [k for k, v in tf_outputs.items() if v is not None] pt_keys = [k for k, v in pt_outputs.items() if v is not None] self.assertEqual(tf_keys, pt_keys, f"{name}: Output keys differ between TF and PyTorch") # convert to the case of `tuple` # appending each key to the current (string) `names` attributes = tuple([f"{name}.{k}" for k in tf_keys]) self.check_pt_tf_outputs( tf_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes ) # Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.) elif type(tf_outputs) in [tuple, list]: self.assertEqual(type(tf_outputs), type(pt_outputs), f"{name}: Output types differ between TF and PyTorch") self.assertEqual(len(tf_outputs), len(pt_outputs), f"{name}: Output lengths differ between TF and PyTorch") if attributes is not None: # case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`) self.assertEqual( len(attributes), len(tf_outputs), f"{name}: The tuple `names` should have the same length as `tf_outputs`", ) else: # case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `names` attributes = tuple([f"{name}_{idx}" for idx in range(len(tf_outputs))]) for tf_output, pt_output, attr in zip(tf_outputs, pt_outputs, attributes): self.check_pt_tf_outputs(tf_output, pt_output, model_class, tol=tol, name=attr) elif isinstance(tf_outputs, tf.Tensor): self.assertTrue( isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `tf_outputs` is" ) tf_outputs = tf_outputs.numpy() pt_outputs = pt_outputs.detach().to("cpu").numpy() self.assertEqual( tf_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between TF and PyTorch" ) # deal with NumPy's scalars to make replacing nan values by 0 work. if np.isscalar(tf_outputs): tf_outputs = np.array([tf_outputs]) pt_outputs = np.array([pt_outputs]) tf_nans = np.isnan(tf_outputs) pt_nans = np.isnan(pt_outputs) pt_outputs[tf_nans] = 0 tf_outputs[tf_nans] = 0 pt_outputs[pt_nans] = 0 tf_outputs[pt_nans] = 0 max_diff = np.amax(np.abs(tf_outputs - pt_outputs)) self.assertLessEqual(max_diff, tol, f"{name}: Difference between torch and tf is {max_diff} (>= {tol}).") else: raise ValueError( "`tf_outputs` should be an instance of `tf.Tensor`, a `tuple`, or an instance of `tf.Tensor`. Got" f" {type(tf_outputs)} instead." ) def prepare_pt_inputs_from_tf_inputs(self, tf_inputs_dict): pt_inputs_dict = {} for name, key in tf_inputs_dict.items(): if type(key) == bool: pt_inputs_dict[name] = key elif name == "input_values": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) elif name == "pixel_values": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) elif name == "input_features": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) # other general float inputs elif tf_inputs_dict[name].dtype.is_floating: pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) else: pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long) return pt_inputs_dict def check_pt_tf_models(self, tf_model, pt_model, tf_inputs_dict): pt_inputs_dict = self.prepare_pt_inputs_from_tf_inputs(tf_inputs_dict) # send pytorch inputs to the correct device pt_inputs_dict = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs_dict.items() } # send pytorch model to the correct device pt_model.to(torch_device) # Check predictions on first output (logits/hidden-states) are close enough given low-level computational differences pt_model.eval() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs_dict) tf_outputs = tf_model(tf_inputs_dict) # tf models returned loss is usually a tensor rather than a scalar. # (see `hf_compute_loss`: it uses `tf.keras.losses.Reduction.NONE`) # Change it here to a scalar to match PyTorch models' loss tf_loss = getattr(tf_outputs, "loss", None) if tf_loss is not None: tf_outputs.loss = tf.math.reduce_mean(tf_loss) self.check_pt_tf_outputs(tf_outputs, pt_outputs, type(tf_model)) @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self, allow_missing_keys=False): import transformers for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions # Make sure no sequence has all zeros as attention mask, otherwise some tests fail due to the inconsistency # of the usage `1e-4`, `1e-9`, `1e-30`, `-inf`. # TODO: Use a uniform value for all models, make sure all tests pass without this processing, and remove it. self._make_attention_mask_non_null(inputs_dict) pt_model_class_name = model_class.__name__[2:] # Skip the "TF" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) tf_model = model_class(config) pt_model = pt_model_class(config) tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class) tf_inputs_dict_with_labels = self._prepare_for_class( inputs_dict, model_class, # Not all models accept "labels" in the forward pass (yet :) ) return_labels=True if "labels" in inspect.signature(model_class.call).parameters.keys() else False, ) # For some models (e.g. base models), there is no label returned. # Set the input dict to `None` to avoid check outputs twice for the same input dicts. if not set(tf_inputs_dict_with_labels.keys()).symmetric_difference(tf_inputs_dict.keys()): tf_inputs_dict_with_labels = None # Check we can load pt model in tf and vice-versa with model => model functions tf_model = transformers.load_pytorch_model_in_tf2_model( tf_model, pt_model, tf_inputs=tf_inputs_dict, allow_missing_keys=allow_missing_keys ) pt_model = transformers.load_tf2_model_in_pytorch_model( pt_model, tf_model, allow_missing_keys=allow_missing_keys ) # Original test: check without `labels` self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) # check with `labels` if tf_inputs_dict_with_labels: self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict_with_labels) # Check we can load pt model in tf and vice-versa with checkpoint => model functions with tempfile.TemporaryDirectory() as tmpdirname: pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin") torch.save(pt_model.state_dict(), pt_checkpoint_path) tf_model = transformers.load_pytorch_checkpoint_in_tf2_model( tf_model, pt_checkpoint_path, allow_missing_keys=allow_missing_keys ) tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5") tf_model.save_weights(tf_checkpoint_path) pt_model = transformers.load_tf2_checkpoint_in_pytorch_model( pt_model, tf_checkpoint_path, allow_missing_keys=allow_missing_keys ) # Original test: check without `labels` self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) # check with `labels` if tf_inputs_dict_with_labels: self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict_with_labels) @slow def test_compile_tf_model(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:2]: # Prepare our model model = model_class(config) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes functional_inputs = { key: tf.keras.Input(shape=val.shape[1:], dtype=val.dtype, name=key) for key, val in model.input_signature.items() if key in model.dummy_inputs } outputs_dict = model(functional_inputs) hidden_states = outputs_dict[0] # Compile extended model functional_model = tf.keras.Model(inputs=functional_inputs, outputs=hidden_states) model_out = functional_model.predict(model.dummy_inputs) # Check we can pass inputs with the Keras API self.assertTrue(model_out is not None) with tempfile.TemporaryDirectory() as tmpdirname: functional_model.save(tmpdirname) # Ensure we can save/export the whole functional model def test_keyword_and_dict_args(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class) outputs_dict = model(inputs) inputs_keywords = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) outputs_keywords = model(**inputs_keywords) output_dict = outputs_dict[0].numpy() output_keywords = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6) def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="Model does not output attentions") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", self.model_tester.seq_length) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length) decoder_key_length = getattr(self.model_tester, "key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) def check_decoder_attentions_output(outputs): out_len = len(outputs) self.assertEqual(min(out_len % 2, out_len % 5), 0) # differentiation due to newly added cross_attentions decoder_attentions = outputs.decoder_attentions self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) def check_encoder_attentions_output(outputs): attentions = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True config.output_hidden_states = False model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) out_len = len(outputs) self.assertEqual(config.output_hidden_states, False) check_encoder_attentions_output(outputs) if self.is_encoder_decoder: model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(config.output_hidden_states, False) check_decoder_attentions_output(outputs) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(config.output_hidden_states, False) check_encoder_attentions_output(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True config.output_hidden_states = True model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs)) self.assertEqual(model.config.output_hidden_states, True) check_encoder_attentions_output(outputs) def test_headmasking(self): if not self.test_head_masking: return random.Random().seed(42) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() random.Random().seed() inputs_dict["output_attentions"] = True config.output_hidden_states = True configs_no_init = _config_zero_init(config) # To be sure we have no Nan for model_class in self.all_model_classes: model = model_class(config=configs_no_init) # Prepare head_mask def prepare_layer_head_mask(i, attention_heads, num_hidden_layers): if i == 0: return tf.concat( (tf.zeros(1, dtype=tf.float32), tf.ones(attention_heads - 1, dtype=tf.float32)), 0 ) elif i == num_hidden_layers - 1: return tf.concat( (tf.zeros(attention_heads - 1, dtype=tf.float32), tf.ones(1, dtype=tf.float32)), 0 ) else: return tf.ones(attention_heads, dtype=tf.float32) head_mask = tf.stack( [ prepare_layer_head_mask(i, config.num_attention_heads, config.num_hidden_layers) for i in range(config.num_hidden_layers) ], 0, ) inputs = self._prepare_for_class(inputs_dict, model_class).copy() inputs["head_mask"] = head_mask if model.config.is_encoder_decoder: signature = inspect.signature(model.call) arg_names = [*signature.parameters.keys()] if "decoder_head_mask" in arg_names: # necessary diferentiation because of T5 model inputs["decoder_head_mask"] = head_mask if "cross_attn_head_mask" in arg_names: inputs["cross_attn_head_mask"] = head_mask outputs = model(**inputs, return_dict=True) def check_attentions_validity(attentions): # Remove Nan for t in attentions: self.assertLess( (tf.math.reduce_sum(tf.cast(tf.math.is_nan(t), tf.float32))).numpy(), (tf.size(t) / 4).numpy() ) # Check we don't have more than 25% nans (arbitrary) attentions = [ tf.where(tf.math.is_nan(t), 0.0, t) for t in attentions ] # remove them (the test is less complete) self.assertAlmostEqual(tf.math.reduce_sum(attentions[0][..., 0, :, :]).numpy(), 0.0) self.assertNotEqual(tf.math.reduce_sum(attentions[0][..., -1, :, :]).numpy(), 0.0) if len(attentions) > 2: # encoder-decodere models have only 2 layers in each modules self.assertNotEqual(tf.math.reduce_sum(attentions[1][..., 0, :, :]).numpy(), 0.0) self.assertAlmostEqual(tf.math.reduce_sum(attentions[-1][..., -2, :, :]).numpy(), 0.0) self.assertNotEqual(tf.math.reduce_sum(attentions[-1][..., -1, :, :]).numpy(), 0.0) if model.config.is_encoder_decoder: check_attentions_validity(outputs.encoder_attentions) check_attentions_validity(outputs.decoder_attentions) if "cross_attn_head_mask" in arg_names: check_attentions_validity(outputs.cross_attentions) else: check_attentions_validity(outputs.attentions) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_hidden_states_output(config, inputs_dict, model_class): model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) if model.config.is_encoder_decoder: encoder_hidden_states = outputs.encoder_hidden_states decoder_hidden_states = outputs.decoder_hidden_states self.assertEqual(config.output_attentions, False) self.assertEqual(len(encoder_hidden_states), expected_num_layers) self.assertListEqual( list(encoder_hidden_states[0].shape[-2:]), [self.model_tester.seq_length, self.model_tester.hidden_size], ) self.assertEqual(len(decoder_hidden_states), expected_num_layers) self.assertListEqual( list(decoder_hidden_states[0].shape[-2:]), [self.model_tester.seq_length, self.model_tester.hidden_size], ) else: hidden_states = outputs.hidden_states self.assertEqual(config.output_attentions, False) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.seq_length, self.model_tester.hidden_size], ) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(config, inputs_dict, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(config, inputs_dict, model_class) def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() text_in_text_out_models = ( get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING) + get_values(TF_MODEL_FOR_MASKED_LM_MAPPING) + get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING) ) speech_in_text_out_models = get_values(TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING) for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), tf.keras.layers.Layer) legacy_text_in_text_out = model.get_lm_head() is not None if model_class in text_in_text_out_models or legacy_text_in_text_out: out_embeddings = model.get_output_embeddings() self.assertIsInstance(out_embeddings, tf.keras.layers.Layer) bias = model.get_bias() if bias is not None: self.assertIsInstance(bias, dict) for _, v in bias.items(): self.assertIsInstance(v, tf.Variable) elif model_class in speech_in_text_out_models: out_embeddings = model.get_output_embeddings() self.assertIsInstance(out_embeddings, tf.keras.layers.Layer) bias = model.get_bias() self.assertIsNone(bias) else: out_embeddings = model.get_output_embeddings() assert out_embeddings is None bias = model.get_bias() self.assertIsNone(bias) def test_determinism(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) first, second = ( model(self._prepare_for_class(inputs_dict, model_class), training=False)[0], model(self._prepare_for_class(inputs_dict, model_class), training=False)[0], ) out_1 = first.numpy() out_2 = second.numpy() out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): tuple_output = model(tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(tuple_object, dict_object)), msg=( "Tuple and dict output are not equal. Difference:" f" {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}" ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) if self.has_attentions: tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) # Not all models accept "labels" in the forward pass (yet :) ) if "labels" in inspect.signature(model.call).parameters.keys(): tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) if self.has_attentions: tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence( model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True} ) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) inputs = copy.deepcopy(inputs_dict) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) if not self.is_encoder_decoder: inputs["inputs_embeds"] = model.get_input_embeddings()(input_ids) else: inputs["inputs_embeds"] = model.get_input_embeddings()(encoder_input_ids) inputs["decoder_inputs_embeds"] = model.get_input_embeddings()(decoder_input_ids) inputs = self._prepare_for_class(inputs, model_class) model(inputs) def test_numpy_arrays_inputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def prepare_numpy_arrays(inputs_dict): inputs_np_dict = {} for k, v in inputs_dict.items(): if tf.is_tensor(v): inputs_np_dict[k] = v.numpy() else: inputs_np_dict[k] = np.array(k) return inputs_np_dict for model_class in self.all_model_classes: model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class) inputs_np = prepare_numpy_arrays(inputs) output_for_dict_input = model(inputs_np) output_for_kw_input = model(**inputs_np) self.assert_outputs_same(output_for_dict_input, output_for_kw_input) def test_valid_input_signature_and_dummies(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) call_args = inspect.signature(model.call).parameters for key in model.input_signature: self.assertIn(key, call_args) for key in model.dummy_inputs: self.assertIn(key, call_args) def test_resize_token_embeddings(self): # TODO (joao): after the embeddings refactor is complete, rework this test so as to rely exclusively on # tf.keras.layers.Embedding if not self.test_resize_embeddings: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(model, embedding_layer): if isinstance(embedding_layer, tf.keras.layers.Embedding): # builds the embeddings layer model.build() return embedding_layer.embeddings else: return model._get_word_embedding_weight(embedding_layer) for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10, None]: # build the embeddings model = model_class(config=copy.deepcopy(config)) # `resize_token_embeddings` mutates `config` old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings()) old_bias = model.get_bias() old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings()) # reshape the embeddings model.resize_token_embeddings(size) new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings()) new_bias = model.get_bias() new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings()) # check that the resized embeddings size matches the desired size. assert_size = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0], assert_size) # check that weights remain the same after resizing models_equal = True for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) if old_bias is not None and new_bias is not None: for old_weight, new_weight in zip(old_bias.values(), new_bias.values()): self.assertEqual(new_weight.shape[-1], assert_size) models_equal = True for p1, p2 in zip(tf.squeeze(old_weight), tf.squeeze(new_weight)): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0], assert_size) self.assertEqual(new_output_embeddings.shape[1], old_output_embeddings.shape[1]) models_equal = True for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) # TODO (Joao): this test is not slow, but it's tagged as such to keep track of failures on the scheduled CI runs, # while passing push CI. Fix the underlying issues and remove the tag. @slow def test_save_load_after_resize_token_embeddings(self): if not self.test_resize_embeddings: return config, original_inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # create a model with resized (expended) embeddings new_tokens_size = 10 old_total_size = config.vocab_size new_total_size = old_total_size + new_tokens_size model = model_class(config=copy.deepcopy(config)) # `resize_token_embeddings` mutates `config` model.build() model.resize_token_embeddings(new_total_size) # fetch the output for an input exclusively made of new members of the vocabulary inputs_dict = copy.deepcopy(original_inputs_dict) ids_feat_name = None if "input_ids" in inputs_dict: ids_feat_name = "input_ids" elif "decoder_input_ids" in inputs_dict: ids_feat_name = "decoder_input_ids" else: assert False, "No input ids feature found in the inputs dict" new_vocab_input_ids = ids_tensor(inputs_dict[ids_feat_name].shape, new_tokens_size) new_vocab_input_ids += old_total_size inputs_dict[ids_feat_name] = new_vocab_input_ids if "input_ids" in inputs_dict: inputs_dict["input_ids"] = new_vocab_input_ids if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"] = new_vocab_input_ids prepared_inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**prepared_inputs) # save and load the model with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=False) model = model_class.from_pretrained(tmpdirname) restored_model_outputs = model(**prepared_inputs) # check that the output for the restored model is the same self.assert_outputs_same(restored_model_outputs, outputs) @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, reason="This test always passes on CPU.", ) def test_embeddings_out_of_bounds_raise_exception(self): # TF embeddings layers don't raise an exception when an index is out of bounds on GPU, so we manually raise it. # This test should only fail on GPU for models where we haven't added the safety check. if not self.test_resize_embeddings: return config, original_inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config=config) inputs_dict = copy.deepcopy(original_inputs_dict) if "input_ids" in inputs_dict: inputs_dict["input_ids"] = inputs_dict["input_ids"] * int(1e9) if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"] = inputs_dict["decoder_input_ids"] * int(1e9) prepared_inputs = self._prepare_for_class(inputs_dict, model_class) with self.assertRaises(tf.errors.InvalidArgumentError): model(**prepared_inputs) def test_lm_head_model_random_no_beam_search_generate(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict.get("input_ids", None) # iterate over all generative models for model_class in self.all_generative_model_classes: model = model_class(config) if config.bos_token_id is None: # if bos token id is not defined model needs input_ids with self.assertRaises(ValueError): model.generate(do_sample=True, max_length=5) # num_return_sequences = 1 self._check_generated_ids(model.generate(input_ids, do_sample=True)) elif model_class.__name__ not in ["TFSpeech2TextForConditionalGeneration"]: # Models with non-text inputs won't work here; num_return_sequences = 1 self._check_generated_ids(model.generate(do_sample=True, max_length=5)) with self.assertRaises(ValueError): # generating multiple sequences when no beam search generation # is not allowed as it would always generate the same sequences model.generate(input_ids, do_sample=False, num_return_sequences=2) # num_return_sequences > 1, sample self._check_generated_ids(model.generate(input_ids, do_sample=True, num_return_sequences=2)) # check bad words tokens language generation # create list of 1-seq bad token and list of 2-seq of bad tokens bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)] output_tokens = model.generate( input_ids, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2 ) # only count generated tokens generated_ids = output_tokens[:, input_ids.shape[-1] :] self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids)) def test_lm_head_model_no_beam_search_generate_dict_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict.get("input_ids", None) if input_ids is None: input_ids = inputs_dict.get("input_features", None) # iterate over all generative models for model_class in self.all_generative_model_classes: model = model_class(config) output_greedy = model.generate( input_ids, do_sample=False, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) output_sample = model.generate( input_ids, do_sample=True, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_greedy, TFGreedySearchEncoderDecoderOutput) self.assertIsInstance(output_sample, TFSampleEncoderDecoderOutput) else: self.assertIsInstance(output_greedy, TFGreedySearchDecoderOnlyOutput) self.assertIsInstance(output_sample, TFSampleDecoderOnlyOutput) def test_lm_head_model_random_beam_search_generate(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict.get("input_ids", None) for model_class in self.all_generative_model_classes: model = model_class(config) if config.bos_token_id is None: # if bos token id is not defined model needs input_ids, num_return_sequences = 1 self._check_generated_ids(model.generate(input_ids, do_sample=True, num_beams=2)) else: # num_return_sequences = 1 self._check_generated_ids(model.generate(do_sample=True, max_length=5, num_beams=2)) with self.assertRaises(ValueError): # generating more sequences than having beams leads is not possible model.generate(input_ids, do_sample=False, num_return_sequences=3, num_beams=2) # num_return_sequences > 1, sample self._check_generated_ids( model.generate( input_ids, do_sample=True, num_beams=2, num_return_sequences=2, ) ) # num_return_sequences > 1, greedy self._check_generated_ids(model.generate(input_ids, do_sample=False, num_beams=2, num_return_sequences=2)) # check bad words tokens language generation # create list of 1-seq bad token and list of 2-seq of bad tokens bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)] output_tokens = model.generate( input_ids, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2 ) # only count generated tokens generated_ids = output_tokens[:, input_ids.shape[-1] :] self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids)) def test_lm_head_model_beam_search_generate_dict_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict.get("input_ids", None) if input_ids is None: input_ids = inputs_dict.get("input_features", None) # iterate over all generative models for model_class in self.all_generative_model_classes: model = model_class(config) output_beam_search = model.generate( input_ids, num_beams=2, do_sample=False, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) output_beam_sample = model.generate( input_ids, num_beams=2, do_sample=True, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_beam_search, TFBeamSearchEncoderDecoderOutput) self.assertIsInstance(output_beam_sample, TFBeamSampleEncoderDecoderOutput) else: self.assertIsInstance(output_beam_search, TFBeamSearchDecoderOnlyOutput) self.assertIsInstance(output_beam_sample, TFBeamSampleDecoderOnlyOutput) def test_loss_computation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # The number of elements in the loss should be the same as the number of elements in the label prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) added_label_names = sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=True) if not added_label_names: continue # This test is only for models with easily-separable labels added_label = prepared_for_class[added_label_names[0]] expected_loss_size = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) possible_input_names = {"input_ids", "pixel_values", "input_features", "input_values"} input_name = possible_input_names.intersection(set(prepared_for_class)).pop() model_input = prepared_for_class.pop(input_name) outputs = model(model_input, **prepared_for_class) if not isinstance(outputs, ModelOutput) or not hasattr(outputs, "loss"): continue loss = outputs.loss self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) # Test that model correctly compute the loss when we mask some positions prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) possible_input_names = {"input_ids", "pixel_values", "input_features", "input_values"} input_name = possible_input_names.intersection(set(prepared_for_class)).pop() model_input = prepared_for_class.pop(input_name) if "labels" in prepared_for_class: labels = prepared_for_class["labels"].numpy() if len(labels.shape) > 1 and labels.shape[1] != 1: labels[0] = -100 prepared_for_class["labels"] = tf.convert_to_tensor(labels) loss = model(model_input, **prepared_for_class)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) self.assertTrue(not np.any(np.isnan(loss.numpy()))) # Test that model correctly compute the loss with a dict prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) loss = model(prepared_for_class)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) # Test that model correctly compute the loss with a tuple prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) # Get keys that were added with the _prepare_for_class function label_keys = prepared_for_class.keys() - inputs_dict.keys() signature = inspect.signature(model.call).parameters signature_names = list(signature.keys()) # Create a dictionary holding the location of the tensors in the tuple tuple_index_mapping = {0: input_name} for label_key in label_keys: label_key_index = signature_names.index(label_key) tuple_index_mapping[label_key_index] = label_key sorted_tuple_index_mapping = sorted(tuple_index_mapping.items()) # Initialize a list with their default values, update the values and convert to a tuple list_input = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default) for index, value in sorted_tuple_index_mapping: list_input[index] = prepared_for_class[value] tuple_input = tuple(list_input) # Send to model loss = model(tuple_input[:-1])[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) def check_keras_fit_results(self, val_loss1, val_loss2, atol=1e-2, rtol=1e-3): self.assertTrue(np.allclose(val_loss1, val_loss2, atol=atol, rtol=rtol)) @slow def test_keras_fit(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # Test that model correctly compute the loss with kwargs prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) # We also remove "return_loss" as this is covered by the train_step when using fit() prepared_for_class = { key: val for key, val in prepared_for_class.items() if key not in ("head_mask", "decoder_head_mask", "cross_attn_head_mask", "return_loss") } if "labels" in prepared_for_class and "decoder_input_ids" in prepared_for_class: del prepared_for_class["decoder_input_ids"] accuracy_classes = [ "ForPreTraining", "ForCausalLM", "ForMaskedLM", "ForQuestionAnswering", "ForMultipleChoice", "ForSequenceClassification", "ForTokenClassification", "ForNextSentencePrediction", "LMHeadModel", ] for accuracy_class in accuracy_classes: if model.__class__.__name__.endswith(accuracy_class): metrics = [tf.keras.metrics.SparseCategoricalAccuracy()] break else: metrics = [] if hasattr(self.model_tester, "batch_size"): sample_weight = tf.convert_to_tensor([0.5] * self.model_tester.batch_size, dtype=tf.float32) else: sample_weight = None # Build the model so we can get some constant weights and check outputs outputs = model(prepared_for_class) if getattr(outputs, "loss", None) is None: continue model_weights = model.get_weights() # Run eagerly to save some expensive compilation times model.compile(optimizer=tf.keras.optimizers.SGD(0.0), run_eagerly=True, metrics=metrics) # Make sure the model fits without crashing regardless of where we pass the labels history1 = model.fit( prepared_for_class, validation_data=prepared_for_class, sample_weight=sample_weight, steps_per_epoch=1, validation_steps=1, shuffle=False, ) val_loss1 = history1.history["val_loss"][0] self.assertTrue(not isnan(val_loss1)) accuracy1 = {key: val[0] for key, val in history1.history.items() if key.endswith("accuracy")} possible_label_cols = { "labels", "label", "label_ids", "start_positions", "start_position", "end_positions", "end_position", "next_sentence_label", } label_names = possible_label_cols.intersection(set(prepared_for_class)) if len(label_names) == 0: # The next tests only make sense for models with separate inputs and labels, and do not make # sense for models that don't clearly distinguish between the two (e.g. CLIP) return labels = {key: val for key, val in prepared_for_class.items() if key in label_names} inputs_minus_labels = {key: val for key, val in prepared_for_class.items() if key not in label_names} self.assertGreater(len(inputs_minus_labels), 0) # We reinitialize the model here even though our learning rate was zero # because BatchNorm updates weights by means other than gradient descent. model.set_weights(model_weights) history2 = model.fit( inputs_minus_labels, labels, validation_data=(inputs_minus_labels, labels), sample_weight=sample_weight, steps_per_epoch=1, validation_steps=1, shuffle=False, ) val_loss2 = history2.history["val_loss"][0] self.assertTrue(not isnan(val_loss2)) accuracy2 = {key: val[0] for key, val in history2.history.items() if key.endswith("accuracy")} self.check_keras_fit_results(val_loss1, val_loss2) self.assertEqual(history1.history.keys(), history2.history.keys()) for key in history1.history.keys(): if not key.startswith("val_"): self.assertTrue("val_" + key in history1.history.keys(), "Outputs differ in train/test step!") if metrics: self.assertTrue(len(accuracy1) == len(accuracy2) > 0, "Missing metrics!") def test_int_support(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: prepared_for_class = self._prepare_for_class( inputs_dict.copy(), model_class, return_labels=True if "labels" in inspect.signature(model_class.call).parameters.keys() else False, ) if not any( tensor.dtype.is_integer for tensor in prepared_for_class.values() if isinstance(tensor, tf.Tensor) ): return # No integer inputs means no need for this test prepared_for_class = { key: tf.cast(tensor, tf.int64) if isinstance(tensor, tf.Tensor) and tensor.dtype.is_integer else tensor for key, tensor in prepared_for_class.items() } model = model_class(config) model(**prepared_for_class) # No assertion, we're just checking this doesn't throw an error int32_prepared_for_class = { key: tf.cast(tensor, tf.int32) if isinstance(tensor, tf.Tensor) and tensor.dtype.is_integer else tensor for key, tensor in prepared_for_class.items() } model(**int32_prepared_for_class) # No assertion, we're just checking this doesn't throw an error # After testing that the model accepts all int inputs, confirm that its dummies are int32 for key, tensor in model.dummy_inputs.items(): self.assertTrue( isinstance(tensor, tf.Tensor) or tf.keras.backend.is_keras_tensor(tensor), "Dummy inputs should be tf.Tensor!", ) if tensor.dtype.is_integer: self.assertTrue(tensor.dtype == tf.int32, "Integer dummy inputs should be tf.int32!") # Also confirm that the input_signature uses int32 for key, tensor_spec in model.input_signature.items(): if tensor_spec.dtype.is_integer: self.assertTrue(tensor_spec.dtype == tf.int32, "Input signatures should use tf.int32 for ints!") def test_generate_with_headmasking(self): attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"] config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_generative_model_classes: model = model_class(config) # We want to test only encoder-decoder models if not config.is_encoder_decoder: continue head_masking = { "head_mask": tf.zeros((config.encoder_layers, config.encoder_attention_heads)), "decoder_head_mask": tf.zeros((config.decoder_layers, config.decoder_attention_heads)), "cross_attn_head_mask": tf.zeros((config.decoder_layers, config.decoder_attention_heads)), } signature = inspect.signature(model.call) if set(head_masking.keys()) < {*signature.parameters.keys()}: continue for attn_name, (name, mask) in zip(attention_names, head_masking.items()): out = model.generate( inputs_dict["input_ids"], num_beams=1, max_length=inputs_dict["input_ids"] + 5, output_attentions=True, return_dict_in_generate=True, **{name: mask}, ) # We check the state of decoder_attentions and cross_attentions just from the last step attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([tf.reduce_sum(w).numpy() for w in attn_weights]), 0.0) def test_load_with_mismatched_shapes(self): if not self.test_mismatched_shapes: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class not in get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING): continue with self.subTest(msg=f"Testing {model_class}"): with tempfile.TemporaryDirectory() as tmp_dir: model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class) _ = model(**inputs) model.save_pretrained(tmp_dir) # Fails when we don't set ignore_mismatched_sizes=True with self.assertRaises(ValueError): new_model = TFAutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42) with self.assertRaises(ValueError): new_model_without_prefix = TFAutoModel.from_pretrained(tmp_dir, vocab_size=10) logger = logging.get_logger("transformers.modeling_tf_utils") with CaptureLogger(logger) as cl: new_model = TFAutoModelForSequenceClassification.from_pretrained( tmp_dir, num_labels=42, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) logits = new_model(**inputs).logits self.assertEqual(logits.shape[1], 42) with CaptureLogger(logger) as cl: new_model_without_prefix = TFAutoModel.from_pretrained( tmp_dir, vocab_size=10, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) # Although Tf models always have a prefix pointing to `MainLayer`, # we still add this "without prefix" test to keep a consistency between tf and pt tests. input_ids = ids_tensor((2, 8), 10) if self.is_encoder_decoder: new_model_without_prefix(input_ids, decoder_input_ids=input_ids) else: new_model_without_prefix(input_ids) def test_model_main_input_name(self): for model_class in self.all_model_classes: model_signature = inspect.signature(getattr(model_class, "call")) # The main input is the name of the argument after `self` observed_main_input_name = list(model_signature.parameters.keys())[1] self.assertEqual(model_class.main_input_name, observed_main_input_name) def test_dataset_conversion(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class, return_labels=False) if "labels" in tf_inputs_dict: return # This is some kinda funky decoder model that needs labels in its forward pass tf_inputs_dict = { key: val for key, val in tf_inputs_dict.items() if "head_mask" not in key and isinstance(val, tf.Tensor) } tf_inputs_dict["extra_unwanted_column"] = list(tf_inputs_dict.values())[0] # Use a random other tensor input_dataset = Dataset.from_dict(tf_inputs_dict) tf_dataset = model.prepare_tf_dataset( input_dataset, batch_size=len(input_dataset), drop_remainder=False, shuffle=False ) test_batch = next(iter(tf_dataset)) if isinstance(test_batch, tf.Tensor): self.assertEqual(len(test_batch), len(input_dataset)) # Assert we didn't lose any data elif isinstance(test_batch, dict): # Assert we discarded the unwanted extra column but kept everything else self.assertEqual(len(test_batch), len(input_dataset.features) - 1) self.assertNotIn("extra_unwanted_column", test_batch) for tensor in test_batch.values(): self.assertTrue(isinstance(tensor, tf.Tensor)) self.assertEqual(len(tensor), len(input_dataset)) # Assert we didn't lose any data model(test_batch, training=False) if "labels" in inspect.signature(model_class.call).parameters.keys(): tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class, return_labels=True) if "labels" not in tf_inputs_dict: return # This model isn't giving us labels after all, don't try training with it tf_inputs_dict = {key: val for key, val in tf_inputs_dict.items() if "head_mask" not in key} tf_inputs_dict["extra_unwanted_column"] = list(tf_inputs_dict.values())[0] # Use a random other tensor input_dataset = Dataset.from_dict(tf_inputs_dict) tf_dataset = model.prepare_tf_dataset( input_dataset, batch_size=len(input_dataset), drop_remainder=False, shuffle=False ) test_batch, test_batch_labels = next(iter(tf_dataset)) self.assertGreater(len(test_batch_labels), 0) # Assert the labels are present feature_columns = 1 if isinstance(test_batch, tf.Tensor) else len(test_batch) label_columns = 1 if isinstance(test_batch_labels, tf.Tensor) else len(test_batch_labels) # Assert we discarded the unwanted extra column but kept everything else self.assertEqual(feature_columns + label_columns, len(input_dataset.features) - 1) if isinstance(test_batch, dict): self.assertNotIn("extra_unwanted_column", test_batch) if isinstance(test_batch_labels, dict): self.assertNotIn("extra_unwanted_column", test_batch_labels) model.compile(optimizer="sgd", run_eagerly=True) model.train_on_batch(test_batch, test_batch_labels) def _test_xla_generate(self, **generate_kwargs): def _generate_and_check_results(model, inputs_dict): if "input_ids" in inputs_dict: inputs = inputs_dict["input_ids"] # make sure there are no pad tokens in prompt, which may trigger unwanted behavior if model.generation_config.pad_token_id is not None: if config.pad_token_id == 0: new_pad_token = model.generation_config.pad_token_id + 1 else: new_pad_token = model.generation_config.pad_token_id - 1 else: new_pad_token = None inputs = tf.where(inputs != model.generation_config.pad_token_id, inputs, new_pad_token) elif "input_features" in inputs_dict: inputs = inputs_dict["input_features"] else: raise ValueError("No valid generate input found in inputs_dict") generated = model.generate(inputs, **generate_kwargs).numpy() generate_xla = tf.function(model.generate, jit_compile=True) generated_xla = generate_xla(inputs, **generate_kwargs).numpy() # Due to numerical instability, let's fail the test only if there are more than 10% of input sequences give # different outputs between XLA and non-XLA versions. If there are less than 10 examples, let's be strict # and not allow any difference. diff = [[], []] for _generated, _generated_xla in zip(generated.tolist(), generated_xla.tolist()): if _generated != _generated_xla: diff[0].append(_generated) diff[1].append(_generated_xla) ratio = len(diff[0]) / len(generated) if ratio > 0.1 or (len(diff[0]) > 0 and len(generated) < 10): self.assertListEqual(diff[0], diff[1]) for model_class in self.all_generative_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.eos_token_id = None # Generate until max length config.do_sample = False # fix config for models with additional sequence-length limiting settings for var_name in ["max_position_embeddings", "max_target_positions"]: attr = getattr(config, var_name, None) if attr is not None and attr < generate_kwargs["max_new_tokens"]: try: setattr(config, var_name, generate_kwargs["max_new_tokens"]) except NotImplementedError: # xlnet will raise an exception when trying to set # max_position_embeddings. pass model = model_class(config) if model.supports_xla_generation: _generate_and_check_results(model, inputs_dict) else: with self.assertRaises(ValueError): _generate_and_check_results(model, inputs_dict) def test_xla_generate_fast(self): """ Basic quick test for generate-compatible classes that confirms that XLA-generated tokens are the same as their non XLA counterparts. Either the model supports XLA generation and passes the inner test, or it raises an appropriate exception """ self._test_xla_generate(num_beams=1, num_return_sequences=1, max_new_tokens=3) @slow def test_xla_generate_contrastive(self): """ Slow and challenging version of `test_xla_generate_fast` for contrastive search -- contrastive search directly manipulates the model cache and other outputs, and this test ensures that they are in a valid format that is also supported by XLA. Either the model supports XLA generation and passes the inner test, or it raises an appropriate exception """ self._test_xla_generate(num_beams=1, num_return_sequences=1, max_new_tokens=16, penalty_alpha=0.5, top_k=4) @slow def test_xla_generate_slow(self): """ Slow and challenging version of `test_xla_generate_fast` -- this test asks for several long sequences using beam search, with and without XLA. The two outputs should match, and a failure in this test indicates that the model may need further analysis if it is to be used for XLA generation. Either the model supports XLA generation and passes the inner test, or it raises an appropriate exception """ self._test_xla_generate(num_beams=8, num_return_sequences=2, max_new_tokens=128) def _generate_random_bad_tokens(self, num_bad_tokens, model): # special tokens cannot be bad tokens special_tokens = [] if model.config.bos_token_id is not None: special_tokens.append(model.config.bos_token_id) if model.config.pad_token_id is not None: special_tokens.append(model.config.pad_token_id) if model.config.eos_token_id is not None: special_tokens.append(model.config.eos_token_id) # create random bad tokens that are not special tokens bad_tokens = [] while len(bad_tokens) < num_bad_tokens: token = tf.squeeze(ids_tensor((1, 1), self.model_tester.vocab_size), 0).numpy()[0] if token not in special_tokens: bad_tokens.append(token) return bad_tokens def _check_generated_ids(self, output_ids): for token_id in output_ids[0].numpy().tolist(): self.assertGreaterEqual(token_id, 0) self.assertLess(token_id, self.model_tester.vocab_size) def _check_match_tokens(self, generated_ids, bad_words_ids): # for all bad word tokens for bad_word_ids in bad_words_ids: # for all slices in batch for generated_ids_slice in generated_ids: # for all word idx for i in range(len(bad_word_ids), len(generated_ids_slice)): # if tokens match if generated_ids_slice[i - len(bad_word_ids) : i] == bad_word_ids: return True return False def ids_tensor(shape, vocab_size, rng=None, name=None, dtype=None): """Creates a random int32 tensor of the shape within the vocab size.""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) output = tf.constant(values, shape=shape, dtype=dtype if dtype is not None else tf.int32) return output def random_attention_mask(shape, rng=None, name=None, dtype=None): attn_mask = ids_tensor(shape, vocab_size=2, rng=None, name=None, dtype=dtype) # make sure that at least one token is attended to for each batch attn_mask = tf.concat([attn_mask[:, :-1], tf.ones_like(attn_mask[:, -1:], dtype=dtype)], axis=-1) return attn_mask def floats_tensor(shape, scale=1.0, rng=None, name=None, dtype=None): """Creates a random float32 tensor""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.random() * scale) return tf.reshape(tf.constant(values, dtype=dtype if dtype is not None else tf.float32), shape=shape)
91,082
47.655449
125
py
transformers
transformers-main/tests/test_image_processing_utils.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 SAMPLE_IMAGE_PROCESSING_CONFIG_DIR = get_tests_dir("fixtures") class ImageProcessorUtilTester(unittest.TestCase): def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit") # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit") # This check we did call the fake head request mock_head.assert_called() def test_legacy_load_from_url(self): # This test is for deprecated behavior and can be removed in v5 _ = ViTImageProcessor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" ) def test_image_processor_from_pretrained_subfolder(self): with self.assertRaises(OSError): # config is in subfolder, the following should not work without specifying the subfolder _ = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants") config = AutoImageProcessor.from_pretrained( "hf-internal-testing/stable-diffusion-all-variants", subfolder="feature_extractor" ) self.assertIsNotNone(config) @is_staging_test class ImageProcessorPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-image-processor") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-image-processor-org") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-image-processor") except HTTPError: pass def test_push_to_hub(self): image_processor = ViTImageProcessor.from_pretrained(SAMPLE_IMAGE_PROCESSING_CONFIG_DIR) image_processor.push_to_hub("test-image-processor", use_auth_token=self._token) new_image_processor = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor") for k, v in image_processor.__dict__.items(): self.assertEqual(v, getattr(new_image_processor, k)) # Reset repo delete_repo(token=self._token, repo_id="test-image-processor") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( tmp_dir, repo_id="test-image-processor", push_to_hub=True, use_auth_token=self._token ) new_image_processor = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor") for k, v in image_processor.__dict__.items(): self.assertEqual(v, getattr(new_image_processor, k)) def test_push_to_hub_in_organization(self): image_processor = ViTImageProcessor.from_pretrained(SAMPLE_IMAGE_PROCESSING_CONFIG_DIR) image_processor.push_to_hub("valid_org/test-image-processor", use_auth_token=self._token) new_image_processor = ViTImageProcessor.from_pretrained("valid_org/test-image-processor") for k, v in image_processor.__dict__.items(): self.assertEqual(v, getattr(new_image_processor, k)) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-image-processor") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( tmp_dir, repo_id="valid_org/test-image-processor-org", push_to_hub=True, use_auth_token=self._token ) new_image_processor = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org") for k, v in image_processor.__dict__.items(): self.assertEqual(v, getattr(new_image_processor, k)) def test_push_to_hub_dynamic_image_processor(self): CustomImageProcessor.register_for_auto_class() image_processor = CustomImageProcessor.from_pretrained(SAMPLE_IMAGE_PROCESSING_CONFIG_DIR) image_processor.push_to_hub("test-dynamic-image-processor", use_auth_token=self._token) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map, {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"}, ) new_image_processor = AutoImageProcessor.from_pretrained( f"{USER}/test-dynamic-image-processor", trust_remote_code=True ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__, "CustomImageProcessor")
6,466
40.722581
131
py
transformers
transformers-main/tests/trainer/test_trainer.py
# coding=utf-8 # Copyright 2018 the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import dataclasses import gc import json import math import os import random import re import subprocess import sys import tempfile import time import unittest from itertools import product from pathlib import Path from unittest.mock import Mock, patch import numpy as np from huggingface_hub import HfFolder, Repository, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import ( AutoTokenizer, IntervalStrategy, PretrainedConfig, TrainingArguments, is_torch_available, logging, ) from transformers.hyperparameter_search import ALL_HYPERPARAMETER_SEARCH_BACKENDS from transformers.testing_utils import ( ENDPOINT_STAGING, TOKEN, USER, CaptureLogger, TestCasePlus, execute_subprocess_async, get_gpu_count, get_tests_dir, is_staging_test, require_accelerate, require_intel_extension_for_pytorch, require_optuna, require_ray, require_safetensors, require_sentencepiece, require_sigopt, require_tokenizers, require_torch, require_torch_bf16_cpu, require_torch_bf16_gpu, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, require_torch_tensorrt_fx, require_torch_tf32, require_torch_up_to_2_gpus, require_torchdynamo, require_wandb, slow, ) from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, HPSearchBackend from transformers.training_args import OptimizerNames from transformers.utils import ( SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, is_apex_available, is_bitsandbytes_available, is_safetensors_available, is_torchdistx_available, ) from transformers.utils.hp_naming import TrialShortNamer if is_torch_available(): import torch from torch import nn from torch.utils.data import IterableDataset import transformers.optimization from transformers import ( AutoModelForSequenceClassification, EarlyStoppingCallback, GlueDataset, GlueDataTrainingArguments, GPT2Config, GPT2LMHeadModel, LineByLineTextDataset, PreTrainedModel, Trainer, TrainerState, ) from transformers.modeling_utils import unwrap_model if is_safetensors_available(): import safetensors.torch PATH_SAMPLE_TEXT = f"{get_tests_dir()}/fixtures/sample_text.txt" class RegressionDataset: def __init__(self, a=2, b=3, length=64, seed=42, label_names=None): np.random.seed(seed) self.label_names = ["labels"] if label_names is None else label_names self.length = length self.x = np.random.normal(size=(length,)).astype(np.float32) self.ys = [a * self.x + b + np.random.normal(scale=0.1, size=(length,)) for _ in self.label_names] self.ys = [y.astype(np.float32) for y in self.ys] def __len__(self): return self.length def __getitem__(self, i): result = {name: y[i] for name, y in zip(self.label_names, self.ys)} result["input_x"] = self.x[i] return result @dataclasses.dataclass class RegressionTrainingArguments(TrainingArguments): a: float = 0.0 b: float = 0.0 def __post_init__(self): super().__post_init__() # save resources not dealing with reporting (also avoids the warning when it's not set) self.report_to = [] class RepeatDataset: def __init__(self, x, length=64): self.x = x self.length = length def __len__(self): return self.length def __getitem__(self, i): return {"input_ids": self.x, "labels": self.x} class DynamicShapesDataset: def __init__(self, length=64, seed=42, batch_size=8): self.length = length np.random.seed(seed) sizes = np.random.randint(1, 20, (length // batch_size,)) # For easy batching, we make every batch_size consecutive samples the same size. self.xs = [np.random.normal(size=(s,)) for s in sizes.repeat(batch_size)] self.ys = [np.random.normal(size=(s,)) for s in sizes.repeat(batch_size)] def __len__(self): return self.length def __getitem__(self, i): return {"input_x": self.xs[i], "labels": self.ys[i]} class AlmostAccuracy: def __init__(self, thresh=0.25): self.thresh = thresh def __call__(self, eval_pred): predictions, labels = eval_pred true = np.abs(predictions - labels) <= self.thresh return {"accuracy": true.astype(np.float32).mean().item()} class RegressionModelConfig(PretrainedConfig): def __init__(self, a=0, b=0, double_output=False, random_torch=True, **kwargs): super().__init__(**kwargs) self.a = a self.b = b self.double_output = double_output self.random_torch = random_torch self.hidden_size = 1 if is_torch_available(): class SampleIterableDataset(IterableDataset): def __init__(self, a=2, b=3, length=64, seed=42, label_names=None): self.dataset = RegressionDataset(a=a, b=b, length=length, seed=seed, label_names=label_names) def __iter__(self): for i in range(len(self.dataset)): yield self.dataset[i] class FiniteIterableDataset(SampleIterableDataset): def __init__(self, a=2, b=3, length=64, seed=42, label_names=None): super().__init__(a, b, length, seed, label_names) self.current_sample = 0 def __iter__(self): while self.current_sample < len(self.dataset): yield self.dataset[self.current_sample] self.current_sample += 1 class MultiLoader: def __init__(self, loaders): self.loaders = loaders def __len__(self): return sum(len(loader) for loader in self.loaders) def __iter__(self): for loader in self.loaders: yield from loader class CustomDataloaderTrainer(Trainer): def get_train_dataloader(self): dataloaders = [super().get_train_dataloader(), super().get_train_dataloader()] return MultiLoader(dataloaders) def get_eval_dataloader(self, eval_dataset): dataloaders = [super().get_eval_dataloader(eval_dataset), super().get_eval_dataloader(eval_dataset)] return MultiLoader(dataloaders) class RegressionModel(nn.Module): def __init__(self, a=0, b=0, double_output=False): super().__init__() self.a = nn.Parameter(torch.tensor(a).float()) self.b = nn.Parameter(torch.tensor(b).float()) self.double_output = double_output self.config = None def forward(self, input_x, labels=None, **kwargs): y = input_x * self.a + self.b if labels is None: return (y, y) if self.double_output else (y,) loss = nn.functional.mse_loss(y, labels) return (loss, y, y) if self.double_output else (loss, y) class RegressionDictModel(nn.Module): def __init__(self, a=0, b=0): super().__init__() self.a = nn.Parameter(torch.tensor(a).float()) self.b = nn.Parameter(torch.tensor(b).float()) self.config = None def forward(self, input_x, labels=None, **kwargs): y = input_x * self.a + self.b result = {"output": y} if labels is not None: result["loss"] = nn.functional.mse_loss(y, labels) return result class RegressionPreTrainedModel(PreTrainedModel): config_class = RegressionModelConfig base_model_prefix = "regression" def __init__(self, config): super().__init__(config) self.a = nn.Parameter(torch.tensor(config.a).float()) self.b = nn.Parameter(torch.tensor(config.b).float()) self.double_output = config.double_output def forward(self, input_x, labels=None, **kwargs): y = input_x * self.a + self.b if labels is None: return (y, y) if self.double_output else (y,) loss = nn.functional.mse_loss(y, labels) return (loss, y, y) if self.double_output else (loss, y) class RegressionRandomPreTrainedModel(PreTrainedModel): config_class = RegressionModelConfig base_model_prefix = "regression" def __init__(self, config): super().__init__(config) self.a = nn.Parameter(torch.tensor(config.a).float()) self.b = nn.Parameter(torch.tensor(config.b).float()) self.random_torch = config.random_torch def forward(self, input_x, labels=None, **kwargs): y = input_x * self.a + self.b if self.random_torch: torch_rand = torch.randn(1).squeeze() np_rand = np.random.rand() rand_rand = random.random() if self.random_torch: y += 0.05 * torch_rand y += 0.05 * torch.tensor(np_rand + rand_rand) if labels is None: return (y,) loss = nn.functional.mse_loss(y, labels) return (loss, y) class TstLayer(nn.Module): def __init__(self, hidden_size): super().__init__() self.linear1 = nn.Linear(hidden_size, hidden_size) self.ln1 = nn.LayerNorm(hidden_size) self.linear2 = nn.Linear(hidden_size, hidden_size) self.ln2 = nn.LayerNorm(hidden_size) self.bias = nn.Parameter(torch.zeros(hidden_size)) def forward(self, x): h = self.ln1(nn.functional.relu(self.linear1(x))) h = nn.functional.relu(self.linear2(x)) return self.ln2(x + h + self.bias) def get_regression_trainer(a=0, b=0, double_output=False, train_len=64, eval_len=64, pretrained=True, **kwargs): label_names = kwargs.get("label_names", None) train_dataset = RegressionDataset(length=train_len, label_names=label_names) eval_dataset = RegressionDataset(length=eval_len, label_names=label_names) model_init = kwargs.pop("model_init", None) if model_init is not None: model = None else: if pretrained: config = RegressionModelConfig(a=a, b=b, double_output=double_output) model = RegressionPreTrainedModel(config) else: model = RegressionModel(a=a, b=b, double_output=double_output) compute_metrics = kwargs.pop("compute_metrics", None) data_collator = kwargs.pop("data_collator", None) optimizers = kwargs.pop("optimizers", (None, None)) output_dir = kwargs.pop("output_dir", "./regression") preprocess_logits_for_metrics = kwargs.pop("preprocess_logits_for_metrics", None) args = RegressionTrainingArguments(output_dir, a=a, b=b, **kwargs) return Trainer( model, args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, compute_metrics=compute_metrics, optimizers=optimizers, model_init=model_init, preprocess_logits_for_metrics=preprocess_logits_for_metrics, ) class TrainerIntegrationCommon: def check_saved_checkpoints(self, output_dir, freq, total, is_pretrained=True, safe_weights=False): weights_file = WEIGHTS_NAME if not safe_weights else SAFE_WEIGHTS_NAME file_list = [weights_file, "training_args.bin", "optimizer.pt", "scheduler.pt", "trainer_state.json"] if is_pretrained: file_list.append("config.json") for step in range(freq, total, freq): checkpoint = os.path.join(output_dir, f"checkpoint-{step}") self.assertTrue(os.path.isdir(checkpoint)) for filename in file_list: self.assertTrue(os.path.isfile(os.path.join(checkpoint, filename))) def check_best_model_has_been_loaded( self, output_dir, freq, total, trainer, metric, greater_is_better=False, is_pretrained=True, safe_weights=False ): checkpoint = os.path.join(output_dir, f"checkpoint-{(total // freq) * freq}") log_history = TrainerState.load_from_json(os.path.join(checkpoint, "trainer_state.json")).log_history values = [d[metric] for d in log_history] best_value = max(values) if greater_is_better else min(values) best_checkpoint = (values.index(best_value) + 1) * freq checkpoint = os.path.join(output_dir, f"checkpoint-{best_checkpoint}") if is_pretrained: best_model = RegressionPreTrainedModel.from_pretrained(checkpoint) best_model.to(trainer.args.device) else: best_model = RegressionModel() if not safe_weights: state_dict = torch.load(os.path.join(checkpoint, WEIGHTS_NAME)) else: state_dict = safetensors.torch.load_file(os.path.join(checkpoint, SAFE_WEIGHTS_NAME)) best_model.load_state_dict(state_dict) best_model.to(trainer.args.device) self.assertTrue(torch.allclose(best_model.a, trainer.model.a)) self.assertTrue(torch.allclose(best_model.b, trainer.model.b)) metrics = trainer.evaluate() self.assertEqual(metrics[metric], best_value) def check_trainer_state_are_the_same(self, trainer_state, trainer_state1): # We'll pop things so operate on copies. state = trainer_state.copy() state1 = trainer_state1.copy() # Log history main contain different logs for the time metrics (after resuming a training). log_history = state.pop("log_history", None) log_history1 = state1.pop("log_history", None) self.assertEqual(state, state1) skip_log_keys = ["train_runtime", "train_samples_per_second", "train_steps_per_second", "train_loss"] for log, log1 in zip(log_history, log_history1): for key in skip_log_keys: _ = log.pop(key, None) _ = log1.pop(key, None) self.assertEqual(log, log1) def convert_to_sharded_checkpoint(self, folder, save_safe=False, load_safe=False): # Converts a checkpoint of a regression model to a sharded checkpoint. if load_safe: loader = safetensors.torch.load_file weights_file = os.path.join(folder, SAFE_WEIGHTS_NAME) else: loader = torch.load weights_file = os.path.join(folder, WEIGHTS_NAME) if save_safe: extension = "safetensors" saver = safetensors.torch.save_file index_file = os.path.join(folder, SAFE_WEIGHTS_INDEX_NAME) shard_name = SAFE_WEIGHTS_NAME else: extension = "bin" saver = torch.save index_file = os.path.join(folder, WEIGHTS_INDEX_NAME) shard_name = WEIGHTS_NAME state_dict = loader(weights_file) os.remove(weights_file) keys = list(state_dict.keys()) shard_files = [ shard_name.replace(f".{extension}", f"-{idx+1:05d}-of-{len(keys):05d}.{extension}") for idx in range(len(keys)) ] index = {"metadata": {}, "weight_map": {key: shard_files[i] for i, key in enumerate(keys)}} with open(index_file, "w", encoding="utf-8") as f: content = json.dumps(index, indent=2, sort_keys=True) + "\n" f.write(content) for param_name, shard_file in zip(keys, shard_files): saver({param_name: state_dict[param_name]}, os.path.join(folder, shard_file)) @require_torch @require_sentencepiece @require_tokenizers class TrainerIntegrationPrerunTest(TestCasePlus, TrainerIntegrationCommon): """ Only tests that want to tap into the auto-pre-run 2 trainings: - self.default_trained_model - self.alternate_trained_model directly, or via check_trained_model """ def setUp(self): super().setUp() args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size trainer = get_regression_trainer(learning_rate=0.1) trainer.train() self.default_trained_model = (trainer.model.a, trainer.model.b) trainer = get_regression_trainer(learning_rate=0.1, seed=314) trainer.train() self.alternate_trained_model = (trainer.model.a, trainer.model.b) def check_trained_model(self, model, alternate_seed=False): # Checks a training seeded with learning_rate = 0.1 (a, b) = self.alternate_trained_model if alternate_seed else self.default_trained_model self.assertTrue(torch.allclose(model.a, a)) self.assertTrue(torch.allclose(model.b, b)) def test_reproducible_training(self): # Checks that training worked, model trained and seed made a reproducible training. trainer = get_regression_trainer(learning_rate=0.1) trainer.train() self.check_trained_model(trainer.model) # Checks that a different seed gets different (reproducible) results. trainer = get_regression_trainer(learning_rate=0.1, seed=314) trainer.train() self.check_trained_model(trainer.model, alternate_seed=True) def test_trainer_with_datasets(self): import datasets np.random.seed(42) x = np.random.normal(size=(64,)).astype(np.float32) y = 2.0 * x + 3.0 + np.random.normal(scale=0.1, size=(64,)) train_dataset = datasets.Dataset.from_dict({"input_x": x, "label": y}) # Base training. Should have the same results as test_reproducible_training model = RegressionModel() args = TrainingArguments("./regression", learning_rate=0.1) trainer = Trainer(model, args, train_dataset=train_dataset) trainer.train() self.check_trained_model(trainer.model) # Can return tensors. train_dataset.set_format(type="torch", dtype=torch.float32) model = RegressionModel() trainer = Trainer(model, args, train_dataset=train_dataset) trainer.train() self.check_trained_model(trainer.model) # Adding one column not used by the model should have no impact z = np.random.normal(size=(64,)).astype(np.float32) train_dataset = datasets.Dataset.from_dict({"input_x": x, "label": y, "extra": z}) model = RegressionModel() trainer = Trainer(model, args, train_dataset=train_dataset) trainer.train() self.check_trained_model(trainer.model) def test_model_init(self): train_dataset = RegressionDataset() args = TrainingArguments("./regression", learning_rate=0.1) trainer = Trainer(args=args, train_dataset=train_dataset, model_init=lambda: RegressionModel()) trainer.train() self.check_trained_model(trainer.model) # Re-training should restart from scratch, thus lead the same results. trainer.train() self.check_trained_model(trainer.model) # Re-training should restart from scratch, thus lead the same results and new seed should be used. trainer.args.seed = 314 trainer.train() self.check_trained_model(trainer.model, alternate_seed=True) def test_gradient_accumulation(self): # Training with half the batch size but accumulation steps as 2 should give the same results. trainer = get_regression_trainer( gradient_accumulation_steps=2, per_device_train_batch_size=4, learning_rate=0.1 ) trainer.train() self.check_trained_model(trainer.model) def test_training_loss(self): n_gpus = max(1, get_gpu_count()) # With even logs trainer = get_regression_trainer(logging_steps=64 / (8 * n_gpus)) trainer.train() log_history = trainer.state.log_history losses = [log["loss"] for log in log_history if "loss" in log] train_loss = log_history[-1]["train_loss"] self.assertAlmostEqual(sum(losses) / len(losses), train_loss, places=4) # With uneven logs trainer = get_regression_trainer(logging_steps=5) trainer.train() log_history = trainer.state.log_history # Training loss should be the same as before new_train_loss = log_history[-1]["train_loss"] self.assertAlmostEqual(train_loss, new_train_loss, places=4) def test_custom_optimizer(self): train_dataset = RegressionDataset() args = TrainingArguments("./regression") model = RegressionModel() optimizer = torch.optim.SGD(model.parameters(), lr=1.0) lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda x: 1.0) trainer = Trainer(model, args, train_dataset=train_dataset, optimizers=(optimizer, lr_scheduler)) trainer.train() (a, b) = self.default_trained_model self.assertFalse(torch.allclose(trainer.model.a, a)) self.assertFalse(torch.allclose(trainer.model.b, b)) self.assertEqual(trainer.optimizer.state_dict()["param_groups"][0]["lr"], 1.0) def test_reduce_lr_on_plateau_args(self): # test passed arguments for a custom ReduceLROnPlateau scheduler train_dataset = RegressionDataset(length=64) eval_dataset = RegressionDataset(length=64) args = TrainingArguments( "./regression", evaluation_strategy="epoch", metric_for_best_model="eval_loss", ) model = RegressionModel() optimizer = torch.optim.SGD(model.parameters(), lr=1.0) lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.2, patience=5, cooldown=2) trainer = Trainer( model, args, train_dataset=train_dataset, eval_dataset=eval_dataset, optimizers=(optimizer, lr_scheduler) ) trainer.train() self.assertIsInstance(trainer.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau) self.assertEqual(trainer.lr_scheduler.factor, 0.2) self.assertEqual(trainer.lr_scheduler.patience, 5) self.assertEqual(trainer.lr_scheduler.cooldown, 2) def test_reduce_lr_on_plateau(self): # test the ReduceLROnPlateau scheduler class TrainerWithLRLogs(Trainer): def log(self, logs): # the LR is computed after metrics and does not exist for the first epoch if hasattr(self.lr_scheduler, "_last_lr"): logs["learning_rate"] = self.lr_scheduler._last_lr super().log(logs) train_dataset = RegressionDataset(length=64) eval_dataset = RegressionDataset(length=64) args = TrainingArguments( "./regression", lr_scheduler_type="reduce_lr_on_plateau", evaluation_strategy="epoch", metric_for_best_model="eval_loss", num_train_epochs=10, learning_rate=0.2, ) model = RegressionModel() trainer = TrainerWithLRLogs(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train() self.assertIsInstance(trainer.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau) patience = trainer.lr_scheduler.patience logs = trainer.state.log_history[1:] best_loss = logs[0]["eval_loss"] bad_epochs = 0 for i, log in enumerate(logs[:-1]): # Compare learning rate to next epoch's loss = log["eval_loss"] just_decreased = False if loss > best_loss: bad_epochs += 1 if bad_epochs > patience: self.assertLess(logs[i + 1]["learning_rate"][0], log["learning_rate"][0]) just_decreased = True bad_epochs = 0 else: best_loss = loss bad_epochs = 0 if not just_decreased: self.assertEqual(logs[i + 1]["learning_rate"][0], log["learning_rate"][0]) def test_adafactor_lr_none(self): # test the special case where lr=None, since Trainer can't not have lr_scheduler from transformers.optimization import Adafactor, AdafactorSchedule train_dataset = RegressionDataset() args = TrainingArguments("./regression") model = RegressionModel() optimizer = Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None) lr_scheduler = AdafactorSchedule(optimizer) trainer = Trainer(model, args, train_dataset=train_dataset, optimizers=(optimizer, lr_scheduler)) trainer.train() (a, b) = self.default_trained_model self.assertFalse(torch.allclose(trainer.model.a, a)) self.assertFalse(torch.allclose(trainer.model.b, b)) self.assertGreater(trainer.optimizer.state_dict()["param_groups"][0]["lr"], 0) @require_torch_gpu @require_torch_bf16_gpu def test_mixed_bf16(self): # very basic test trainer = get_regression_trainer(learning_rate=0.1, bf16=True) trainer.train() self.check_trained_model(trainer.model) # --bf16 --half_precision_backend apex can't be used together with self.assertRaises(ValueError): trainer = get_regression_trainer(learning_rate=0.1, bf16=True, half_precision_backend="apex") # will add more specific tests once there are some bugs to fix @require_torch_gpu @require_torch_tf32 def test_tf32(self): # very basic test trainer = get_regression_trainer(learning_rate=0.1, tf32=True) trainer.train() self.check_trained_model(trainer.model) @require_torch @require_sentencepiece @require_tokenizers class TrainerIntegrationTest(TestCasePlus, TrainerIntegrationCommon): def setUp(self): super().setUp() args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def test_trainer_works_with_dict(self): # Edge case because Apex with mode O2 will change our models to return dicts. This test checks it doesn't break # anything. train_dataset = RegressionDataset() eval_dataset = RegressionDataset() model = RegressionDictModel() args = TrainingArguments("./regression") trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train() _ = trainer.evaluate() _ = trainer.predict(eval_dataset) def test_evaluation_with_keys_to_drop(self): config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) x = torch.randint(0, 100, (128,)) eval_dataset = RepeatDataset(x) args = TrainingArguments("./test") trainer = Trainer(tiny_gpt2, args, eval_dataset=eval_dataset) # By default the past_key_values are removed result = trainer.predict(eval_dataset) self.assertTrue(isinstance(result.predictions, np.ndarray)) # We can still get them by setting ignore_keys to [] result = trainer.predict(eval_dataset, ignore_keys=[]) self.assertTrue(isinstance(result.predictions, tuple)) self.assertEqual(len(result.predictions), 2) def test_training_arguments_are_left_untouched(self): trainer = get_regression_trainer() trainer.train() args = TrainingArguments("./regression", report_to=[]) dict1, dict2 = args.to_dict(), trainer.args.to_dict() for key in dict1.keys(): # Logging dir can be slightly different as they default to something with the time. if key != "logging_dir": self.assertEqual(dict1[key], dict2[key]) def test_number_of_steps_in_training(self): # Regular training has n_epochs * len(train_dl) steps trainer = get_regression_trainer(learning_rate=0.1) train_output = trainer.train() self.assertEqual(train_output.global_step, self.n_epochs * 64 / self.batch_size) # Check passing num_train_epochs works (and a float version too): trainer = get_regression_trainer(learning_rate=0.1, num_train_epochs=1.5) train_output = trainer.train() self.assertEqual(train_output.global_step, int(1.5 * 64 / self.batch_size)) # If we pass a max_steps, num_train_epochs is ignored trainer = get_regression_trainer(learning_rate=0.1, max_steps=10) train_output = trainer.train() self.assertEqual(train_output.global_step, 10) @require_torch_bf16_cpu @require_intel_extension_for_pytorch def test_number_of_steps_in_training_with_ipex(self): for mix_bf16 in [True, False]: # Regular training has n_epochs * len(train_dl) steps trainer = get_regression_trainer(learning_rate=0.1, use_ipex=True, bf16=mix_bf16, no_cuda=True) train_output = trainer.train() self.assertEqual(train_output.global_step, self.n_epochs * 64 / trainer.args.train_batch_size) # Check passing num_train_epochs works (and a float version too): trainer = get_regression_trainer( learning_rate=0.1, num_train_epochs=1.5, use_ipex=True, bf16=mix_bf16, no_cuda=True ) train_output = trainer.train() self.assertEqual(train_output.global_step, int(1.5 * 64 / trainer.args.train_batch_size)) # If we pass a max_steps, num_train_epochs is ignored trainer = get_regression_trainer( learning_rate=0.1, max_steps=10, use_ipex=True, bf16=mix_bf16, no_cuda=True ) train_output = trainer.train() self.assertEqual(train_output.global_step, 10) def test_logging_inf_nan_filter(self): config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) # Trainer without inf/nan filter args = TrainingArguments("./test", learning_rate=1e9, logging_steps=5, logging_nan_inf_filter=False) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) trainer.train() log_history_no_filter = trainer.state.log_history # Trainer with inf/nan filter args = TrainingArguments("./test", learning_rate=1e9, logging_steps=5, logging_nan_inf_filter=True) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) trainer.train() log_history_filter = trainer.state.log_history def is_any_loss_nan_or_inf(log_history): losses = [l["loss"] for l in log_history[:-1]] return any(math.isnan(x) for x in losses) or any(math.isinf(x) for x in losses) self.assertTrue(is_any_loss_nan_or_inf(log_history_no_filter)) self.assertFalse(is_any_loss_nan_or_inf(log_history_filter)) def test_train_and_eval_dataloaders(self): n_gpu = max(1, torch.cuda.device_count()) trainer = get_regression_trainer(learning_rate=0.1, per_device_train_batch_size=16) self.assertEqual(trainer.get_train_dataloader().total_batch_size, 16 * n_gpu) trainer = get_regression_trainer(learning_rate=0.1, per_device_eval_batch_size=16) self.assertEqual(trainer.get_eval_dataloader().total_batch_size, 16 * n_gpu) # Check drop_last works trainer = get_regression_trainer( train_len=66, eval_len=74, learning_rate=0.1, per_device_train_batch_size=16, per_device_eval_batch_size=32 ) self.assertEqual(len(trainer.get_train_dataloader()), 66 // (16 * n_gpu) + 1) self.assertEqual(len(trainer.get_eval_dataloader()), 74 // (32 * n_gpu) + 1) trainer = get_regression_trainer( train_len=66, eval_len=74, learning_rate=0.1, per_device_train_batch_size=16, per_device_eval_batch_size=32, dataloader_drop_last=True, ) self.assertEqual(len(trainer.get_train_dataloader()), 66 // (16 * n_gpu)) self.assertEqual(len(trainer.get_eval_dataloader()), 74 // (32 * n_gpu)) # Check passing a new dataset for evaluation works new_eval_dataset = RegressionDataset(length=128) self.assertEqual(len(trainer.get_eval_dataloader(new_eval_dataset)), 128 // (32 * n_gpu)) # tests that we do not require dataloader to have a .dataset attribute def test_dataloader_without_dataset(self): train_dataset = RegressionDataset(length=128) trainer = CustomDataloaderTrainer( model=RegressionModel(), train_dataset=train_dataset, eval_dataset=train_dataset ) trainer.train() trainer.evaluate() @require_torch_multi_gpu def test_data_is_not_parallelized_when_model_is_parallel(self): model = RegressionModel() # Make the Trainer believe it's a parallelized model model.is_parallelizable = True model.model_parallel = True args = TrainingArguments("./regression", per_device_train_batch_size=16, per_device_eval_batch_size=16) trainer = Trainer(model, args, train_dataset=RegressionDataset(), eval_dataset=RegressionDataset()) # Check the Trainer was fooled self.assertTrue(trainer.is_model_parallel) self.assertEqual(trainer.args.n_gpu, 1) # The batch size of the training and evaluation dataloaders should be 16, not 16 * n_gpu self.assertEqual(trainer.get_train_dataloader().total_batch_size, 16) self.assertEqual(len(trainer.get_train_dataloader()), 64 // 16) self.assertEqual(trainer.get_eval_dataloader().total_batch_size, 16) self.assertEqual(len(trainer.get_eval_dataloader()), 64 // 16) def test_evaluate(self): trainer = get_regression_trainer(a=1.5, b=2.5, compute_metrics=AlmostAccuracy()) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With a number of elements not a round multiple of the batch size trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, compute_metrics=AlmostAccuracy()) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With logits preprocess trainer = get_regression_trainer( a=1.5, b=2.5, compute_metrics=AlmostAccuracy(), preprocess_logits_for_metrics=lambda logits, labels: logits + 1, ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred + 1, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) def test_evaluate_with_jit(self): trainer = get_regression_trainer(a=1.5, b=2.5, compute_metrics=AlmostAccuracy(), jit_mode_eval=True) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With a number of elements not a round multiple of the batch size trainer = get_regression_trainer( a=1.5, b=2.5, eval_len=66, compute_metrics=AlmostAccuracy(), jit_mode_eval=True ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With logits preprocess trainer = get_regression_trainer( a=1.5, b=2.5, compute_metrics=AlmostAccuracy(), preprocess_logits_for_metrics=lambda logits, labels: logits + 1, jit_mode_eval=True, ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred + 1, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) @require_torch_bf16_cpu @require_intel_extension_for_pytorch def test_evaluate_with_ipex(self): for mix_bf16 in [True, False]: trainer = get_regression_trainer( a=1.5, b=2.5, use_ipex=True, compute_metrics=AlmostAccuracy(), bf16=mix_bf16, no_cuda=True ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With a number of elements not a round multiple of the batch size trainer = get_regression_trainer( a=1.5, b=2.5, use_ipex=True, eval_len=66, compute_metrics=AlmostAccuracy(), bf16=mix_bf16, no_cuda=True, ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With logits preprocess trainer = get_regression_trainer( a=1.5, b=2.5, use_ipex=True, compute_metrics=AlmostAccuracy(), preprocess_logits_for_metrics=lambda logits, labels: logits + 1, bf16=mix_bf16, no_cuda=True, ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred + 1, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) def test_predict(self): trainer = get_regression_trainer(a=1.5, b=2.5) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With a number of elements not a round multiple of the batch size trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With more than one output of the model trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) # With more than one output/label of the model trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True, label_names=["labels", "labels_2"]) outputs = trainer.predict(trainer.eval_dataset) preds = outputs.predictions labels = outputs.label_ids x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0])) self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1])) def test_predict_with_jit(self): trainer = get_regression_trainer(a=1.5, b=2.5, jit_mode_eval=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With a number of elements not a round multiple of the batch size trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, jit_mode_eval=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With more than one output of the model trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True, jit_mode_eval=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) # With more than one output/label of the model trainer = get_regression_trainer( a=1.5, b=2.5, double_output=True, label_names=["labels", "labels_2"], jit_mode_eval=True ) outputs = trainer.predict(trainer.eval_dataset) preds = outputs.predictions labels = outputs.label_ids x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0])) self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1])) @require_torch_bf16_cpu @require_intel_extension_for_pytorch def test_predict_with_ipex(self): for mix_bf16 in [True, False]: trainer = get_regression_trainer(a=1.5, b=2.5, use_ipex=True, bf16=mix_bf16, no_cuda=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With a number of elements not a round multiple of the batch size trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, use_ipex=True, bf16=mix_bf16, no_cuda=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With more than one output of the model trainer = get_regression_trainer( a=1.5, b=2.5, double_output=True, use_ipex=True, bf16=mix_bf16, no_cuda=True ) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) # With more than one output/label of the model trainer = get_regression_trainer( a=1.5, b=2.5, double_output=True, label_names=["labels", "labels_2"], use_ipex=True, bf16=mix_bf16, no_cuda=True, ) outputs = trainer.predict(trainer.eval_dataset) preds = outputs.predictions labels = outputs.label_ids x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0])) self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1])) def test_dynamic_shapes(self): eval_dataset = DynamicShapesDataset(batch_size=self.batch_size) model = RegressionModel(a=2, b=1) args = TrainingArguments("./regression") trainer = Trainer(model, args, eval_dataset=eval_dataset) # Check evaluation can run to completion _ = trainer.evaluate() # Check predictions preds = trainer.predict(eval_dataset) for expected, seen in zip(eval_dataset.ys, preds.label_ids): self.assertTrue(np.array_equal(expected, seen[: expected.shape[0]])) self.assertTrue(np.all(seen[expected.shape[0] :] == -100)) for expected, seen in zip(eval_dataset.xs, preds.predictions): self.assertTrue(np.array_equal(2 * expected + 1, seen[: expected.shape[0]])) self.assertTrue(np.all(seen[expected.shape[0] :] == -100)) # Same tests with eval accumulation args = TrainingArguments("./regression", eval_accumulation_steps=2) trainer = Trainer(model, args, eval_dataset=eval_dataset) # Check evaluation can run to completion _ = trainer.evaluate() # Check predictions preds = trainer.predict(eval_dataset) for expected, seen in zip(eval_dataset.ys, preds.label_ids): self.assertTrue(np.array_equal(expected, seen[: expected.shape[0]])) self.assertTrue(np.all(seen[expected.shape[0] :] == -100)) for expected, seen in zip(eval_dataset.xs, preds.predictions): self.assertTrue(np.array_equal(2 * expected + 1, seen[: expected.shape[0]])) self.assertTrue(np.all(seen[expected.shape[0] :] == -100)) def test_log_level(self): # testing only --log_level (--log_level_replica requires multiple gpus and DDP and is tested elsewhere) logger = logging.get_logger() log_info_string = "Running training" # test with the default log_level - should be the same as before and thus we test depending on is_info is_info = logging.get_verbosity() <= 20 with CaptureLogger(logger) as cl: trainer = get_regression_trainer() trainer.train() if is_info: self.assertIn(log_info_string, cl.out) else: self.assertNotIn(log_info_string, cl.out) # test with low log_level - lower than info with CaptureLogger(logger) as cl: trainer = get_regression_trainer(log_level="debug") trainer.train() self.assertIn(log_info_string, cl.out) # test with high log_level - should be quiet with CaptureLogger(logger) as cl: trainer = get_regression_trainer(log_level="error") trainer.train() self.assertNotIn(log_info_string, cl.out) def test_save_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(output_dir=tmpdir, save_steps=5) trainer.train() self.check_saved_checkpoints(tmpdir, 5, int(self.n_epochs * 64 / self.batch_size)) # With a regular model that is not a PreTrainedModel with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(output_dir=tmpdir, save_steps=5, pretrained=False) trainer.train() self.check_saved_checkpoints(tmpdir, 5, int(self.n_epochs * 64 / self.batch_size), False) @require_safetensors def test_safe_checkpoints(self): for save_safetensors in [True, False]: with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(output_dir=tmpdir, save_steps=5, save_safetensors=save_safetensors) trainer.train() self.check_saved_checkpoints( tmpdir, 5, int(self.n_epochs * 64 / self.batch_size), safe_weights=save_safetensors ) # With a regular model that is not a PreTrainedModel with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, save_steps=5, pretrained=False, save_safetensors=save_safetensors ) trainer.train() self.check_saved_checkpoints( tmpdir, 5, int(self.n_epochs * 64 / self.batch_size), False, safe_weights=save_safetensors ) @require_torch_multi_gpu def test_run_seq2seq_double_train_wrap_once(self): # test that we don't wrap the model more than once # since wrapping primarily happens on multi-gpu setup we want multiple gpus to test for # example DataParallel(DataParallel(model)) trainer = get_regression_trainer() trainer.train() model_wrapped_before = trainer.model_wrapped trainer.train() model_wrapped_after = trainer.model_wrapped self.assertIs(model_wrapped_before, model_wrapped_after, "should be not wrapped twice") @require_torch_up_to_2_gpus def test_can_resume_training(self): # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model # won't be the same since the training dataloader is shuffled). with tempfile.TemporaryDirectory() as tmpdir: kwargs = { "output_dir": tmpdir, "train_len": 128, "save_steps": 5, "learning_rate": 0.1, "logging_steps": 5, } trainer = get_regression_trainer(**kwargs) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") # Reinitialize trainer trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) # Now check with a later checkpoint that it also works when we span over one epoch checkpoint = os.path.join(tmpdir, "checkpoint-15") # Reinitialize trainer and load model trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) # With a regular model that is not a PreTrainedModel with tempfile.TemporaryDirectory() as tmpdir: kwargs = { "output_dir": tmpdir, "train_len": 128, "save_steps": 5, "learning_rate": 0.1, "pretrained": False, } trainer = get_regression_trainer(**kwargs) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") # Reinitialize trainer and load model trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) # Now check with a later checkpoint that it also works when we span over one epoch checkpoint = os.path.join(tmpdir, "checkpoint-15") # Reinitialize trainer and load model trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) # Now check failures # 1. fail to find a bogus checkpoint trainer = get_regression_trainer() with self.assertRaises(Exception) as context: trainer.train(resume_from_checkpoint=f"{checkpoint}-bogus") self.assertTrue("Can't find a valid checkpoint at" in str(context.exception)) # 2. fail to find any checkpoint - due a fresh output_dir output_dir2 = self.get_auto_remove_tmp_dir() trainer = get_regression_trainer(output_dir=output_dir2) with self.assertRaises(Exception) as context: trainer.train(resume_from_checkpoint=True) self.assertTrue("No valid checkpoint found in output directory" in str(context.exception)) def test_resume_training_with_randomness(self): # For more than 1 GPUs, since the randomness is introduced in the model and with DataParallel (which is used # in this test for more than 2 GPUs), the calls to the torch RNG will happen in a random order (sometimes # GPU 0 will call first and sometimes GPU 1). random_torch = not torch.cuda.is_available() or torch.cuda.device_count() <= 1 if torch.cuda.is_available(): torch.backends.cudnn.deterministic = True train_dataset = RegressionDataset(length=128) eval_dataset = RegressionDataset() with self.subTest("Test every step"): config = RegressionModelConfig(a=0, b=2, random_torch=random_torch) model = RegressionRandomPreTrainedModel(config) tmp_dir = self.get_auto_remove_tmp_dir() args = RegressionTrainingArguments(tmp_dir, save_steps=5, learning_rate=0.1) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() model = RegressionRandomPreTrainedModel(config) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train(resume_from_checkpoint=os.path.join(tmp_dir, "checkpoint-15")) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() self.assertAlmostEqual(a, a1, delta=1e-5) self.assertAlmostEqual(b, b1, delta=1e-5) with self.subTest("Test every epoch"): config = RegressionModelConfig(a=0, b=2, random_torch=random_torch) model = RegressionRandomPreTrainedModel(config) tmp_dir = self.get_auto_remove_tmp_dir() args = RegressionTrainingArguments(tmp_dir, save_strategy="epoch", learning_rate=0.1) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() model = RegressionRandomPreTrainedModel(config) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) checkpoints = [d for d in os.listdir(tmp_dir) if d.startswith("checkpoint-")] # There should be one checkpoint per epoch. self.assertEqual(len(checkpoints), 3) checkpoint_dir = sorted(checkpoints, key=lambda x: int(x.replace("checkpoint-", "")))[0] trainer.train(resume_from_checkpoint=os.path.join(tmp_dir, checkpoint_dir)) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() self.assertAlmostEqual(a, a1, delta=1e-5) self.assertAlmostEqual(b, b1, delta=1e-5) @slow @require_accelerate @require_torch_non_multi_gpu def test_auto_batch_size_finder(self): if torch.cuda.is_available(): torch.backends.cudnn.deterministic = True SRC_DIR = os.path.abspath( os.path.join(os.path.dirname(__file__), "..", "..", "examples", "pytorch", "text-classification") ) sys.path.append(SRC_DIR) import run_glue with tempfile.TemporaryDirectory() as tmpdir: testargs = f""" run_glue.py --model_name_or_path distilbert-base-uncased --task_name mrpc --do_train --do_eval --max_seq_len 128 --per_device_train_batch_size 4096 --learning_rate 2e-5 --num_train_epochs 1 --output_dir {tmpdir} --auto_find_batch_size 0 """.split() with self.assertRaises(RuntimeError): with patch.object(sys, "argv", testargs): run_glue.main() testargs[-1] = "1" with patch.object(sys, "argv", testargs): run_glue.main() # regression for this issue: https://github.com/huggingface/transformers/issues/12970 def test_training_with_resume_from_checkpoint_false(self): train_dataset = RegressionDataset(length=128) eval_dataset = RegressionDataset() config = RegressionModelConfig(a=0, b=2) model = RegressionRandomPreTrainedModel(config) tmp_dir = self.get_auto_remove_tmp_dir() args = RegressionTrainingArguments(tmp_dir, save_steps=5, learning_rate=0.1) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train(resume_from_checkpoint=False) @require_torch_up_to_2_gpus def test_resume_training_with_shard_checkpoint(self): # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model # won't be the same since the training dataloader is shuffled). with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") self.convert_to_sharded_checkpoint(checkpoint) # Reinitialize trainer trainer = get_regression_trainer(output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) @require_safetensors @require_torch_up_to_2_gpus def test_resume_training_with_safe_checkpoint(self): # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model # won't be the same since the training dataloader is shuffled). for initial_safe in [False, True]: for loaded_safe in [False, True]: with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1, save_safetensors=initial_safe, ) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") self.convert_to_sharded_checkpoint(checkpoint, load_safe=initial_safe, save_safe=loaded_safe) # Reinitialize trainer trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1, save_safetensors=loaded_safe ) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) @require_torch_up_to_2_gpus def test_resume_training_with_gradient_accumulation(self): # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model # won't be the same since the training dataloader is shuffled). with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, gradient_accumulation_steps=2, per_device_train_batch_size=4, save_steps=5, learning_rate=0.1, ) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") # Reinitialize trainer trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, gradient_accumulation_steps=2, per_device_train_batch_size=4, save_steps=5, learning_rate=0.1, ) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) @require_torch_up_to_2_gpus def test_resume_training_with_frozen_params(self): # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model # won't be the same since the training dataloader is shuffled). with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, per_device_train_batch_size=4, save_steps=5, learning_rate=0.1, ) trainer.model.a.requires_grad_(False) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") # Reinitialize trainer trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, per_device_train_batch_size=4, save_steps=5, learning_rate=0.1, ) trainer.model.a.requires_grad_(False) trainer.train(resume_from_checkpoint=checkpoint) self.assertFalse(trainer.model.a.requires_grad) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) def test_load_best_model_at_end(self): total = int(self.n_epochs * 64 / self.batch_size) with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, eval_steps=5, evaluation_strategy="steps", save_steps=5, load_best_model_at_end=True, ) self.assertFalse(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 5, total) self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_loss") with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, eval_steps=5, evaluation_strategy="steps", save_steps=5, load_best_model_at_end=True, metric_for_best_model="accuracy", compute_metrics=AlmostAccuracy(), ) self.assertTrue(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 5, total) self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_accuracy", greater_is_better=True) with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, evaluation_strategy="epoch", save_strategy="epoch", load_best_model_at_end=True, metric_for_best_model="accuracy", compute_metrics=AlmostAccuracy(), ) self.assertTrue(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 64 // self.batch_size, total) self.check_best_model_has_been_loaded( tmpdir, 64 // self.batch_size, total, trainer, "eval_accuracy", greater_is_better=True ) # Test this works with a non PreTrainedModel with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, learning_rate=0.1, eval_steps=5, evaluation_strategy="steps", save_steps=5, load_best_model_at_end=True, pretrained=False, ) self.assertFalse(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 5, total, is_pretrained=False) self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_loss", is_pretrained=False) @require_safetensors def test_load_best_model_from_safetensors(self): total = int(self.n_epochs * 64 / self.batch_size) for save_safetensors, pretrained in product([False, True], [False, True]): with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, eval_steps=5, evaluation_strategy="steps", save_steps=5, load_best_model_at_end=True, save_safetensors=save_safetensors, pretrained=pretrained, ) self.assertFalse(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 5, total, is_pretrained=pretrained, safe_weights=save_safetensors) self.check_best_model_has_been_loaded( tmpdir, 5, total, trainer, "eval_loss", is_pretrained=pretrained, safe_weights=save_safetensors ) @slow def test_trainer_eval_mrpc(self): MODEL_ID = "bert-base-cased-finetuned-mrpc" tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) model = AutoModelForSequenceClassification.from_pretrained(MODEL_ID) data_args = GlueDataTrainingArguments( task_name="mrpc", data_dir=f"{get_tests_dir()}/fixtures/tests_samples/MRPC", overwrite_cache=True ) eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev") training_args = TrainingArguments(output_dir="./examples", no_cuda=True) trainer = Trainer(model=model, args=training_args, eval_dataset=eval_dataset) result = trainer.evaluate() self.assertLess(result["eval_loss"], 0.2) @slow def test_trainer_eval_lm(self): MODEL_ID = "distilroberta-base" tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) dataset = LineByLineTextDataset( tokenizer=tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=tokenizer.max_len_single_sentence, ) self.assertEqual(len(dataset), 31) def test_training_iterable_dataset(self): config = RegressionModelConfig() model = RegressionPreTrainedModel(config) # Adding one column not used by the model should have no impact train_dataset = SampleIterableDataset(label_names=["labels", "extra"]) args = RegressionTrainingArguments(output_dir="./examples", max_steps=4) trainer = Trainer(model=model, args=args, train_dataset=train_dataset) trainer.train() self.assertEqual(trainer.state.global_step, 4) loader = trainer.get_train_dataloader() self.assertIsInstance(loader, torch.utils.data.DataLoader) self.assertIsInstance(loader.sampler, torch.utils.data.dataloader._InfiniteConstantSampler) def test_evaluation_iterable_dataset(self): config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) # Adding one column not used by the model should have no impact eval_dataset = SampleIterableDataset(label_names=["labels", "extra"]) args = RegressionTrainingArguments(output_dir="./examples") trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset, compute_metrics=AlmostAccuracy()) results = trainer.evaluate() x, y = trainer.eval_dataset.dataset.x, trainer.eval_dataset.dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With a number of elements not a round multiple of the batch size eval_dataset = SampleIterableDataset(length=66) results = trainer.evaluate(eval_dataset) x, y = eval_dataset.dataset.x, eval_dataset.dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) def test_predict_iterable_dataset(self): config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) eval_dataset = SampleIterableDataset() args = RegressionTrainingArguments(output_dir="./examples") trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset, compute_metrics=AlmostAccuracy()) preds = trainer.predict(trainer.eval_dataset).predictions x = eval_dataset.dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With a number of elements not a round multiple of the batch size # Adding one column not used by the model should have no impact test_dataset = SampleIterableDataset(length=66, label_names=["labels", "extra"]) preds = trainer.predict(test_dataset).predictions x = test_dataset.dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) def test_num_train_epochs_in_training(self): # len(train_dl) < gradient_accumulation_steps shouldn't give ``ZeroDivisionError`` when ``max_steps`` is given. # It should give 1 update step for each epoch. trainer = get_regression_trainer( max_steps=3, train_len=64, per_device_train_batch_size=16, gradient_accumulation_steps=5 ) train_output = trainer.train() self.assertEqual(train_output.global_step, 3) # Even ``max_steps`` is not specified, we still expect 1 update step for each epoch if # len(train_dl) < gradient_accumulation_steps. trainer = get_regression_trainer(train_len=64, per_device_train_batch_size=16, gradient_accumulation_steps=5) train_output = trainer.train() self.assertEqual(train_output.global_step, int(self.n_epochs)) def test_early_stopping_callback(self): # early stopping stops training before num_training_epochs with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, num_train_epochs=20, gradient_accumulation_steps=1, per_device_train_batch_size=16, load_best_model_at_end=True, evaluation_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, compute_metrics=AlmostAccuracy(), metric_for_best_model="accuracy", ) trainer.add_callback(EarlyStoppingCallback(1, 0.0001)) train_output = trainer.train() self.assertLess(train_output.global_step, 20 * 64 / 16) # Invalid inputs to trainer with early stopping callback result in assertion error with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, num_train_epochs=20, gradient_accumulation_steps=1, per_device_train_batch_size=16, evaluation_strategy=IntervalStrategy.EPOCH, compute_metrics=AlmostAccuracy(), metric_for_best_model="accuracy", ) trainer.add_callback(EarlyStoppingCallback(1)) self.assertEqual(trainer.state.global_step, 0) try: trainer.train() except AssertionError: self.assertEqual(trainer.state.global_step, 0) def test_flos_extraction(self): trainer = get_regression_trainer(learning_rate=0.1) def assert_flos_extraction(trainer, wrapped_model_to_check): self.assertEqual(trainer.model, unwrap_model(wrapped_model_to_check)) self.assertGreaterEqual(getattr(unwrap_model(wrapped_model_to_check).config, "total_flos", 0), 0) # with plain model assert_flos_extraction(trainer, trainer.model) # with enforced DataParallel assert_flos_extraction(trainer, nn.DataParallel(trainer.model)) trainer.train() self.assertTrue(isinstance(trainer.state.total_flos, float)) def check_checkpoint_deletion(self, trainer, output_dir, expected): # Make fake checkpoints for n in [5, 10, 15, 20, 25]: os.makedirs(os.path.join(output_dir, f"{PREFIX_CHECKPOINT_DIR}-{n}"), exist_ok=True) trainer._rotate_checkpoints(output_dir=output_dir) glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{PREFIX_CHECKPOINT_DIR}-*")] values = [int(re.match(f".*{PREFIX_CHECKPOINT_DIR}-([0-9]+)", d).groups()[0]) for d in glob_checkpoints] self.assertSetEqual(set(values), set(expected)) def test_checkpoint_rotation(self): with tempfile.TemporaryDirectory() as tmp_dir: # Without best model at end trainer = get_regression_trainer(output_dir=tmp_dir, save_total_limit=2) self.check_checkpoint_deletion(trainer, tmp_dir, [20, 25]) # With best model at end trainer = get_regression_trainer( output_dir=tmp_dir, evaluation_strategy="steps", load_best_model_at_end=True, save_total_limit=2 ) trainer.state.best_model_checkpoint = os.path.join(tmp_dir, "checkpoint-5") self.check_checkpoint_deletion(trainer, tmp_dir, [5, 25]) # Edge case: we don't always honor save_total_limit=1 if load_best_model_at_end=True to be able to resume # from checkpoint trainer = get_regression_trainer( output_dir=tmp_dir, evaluation_strategy="steps", load_best_model_at_end=True, save_total_limit=1 ) trainer.state.best_model_checkpoint = os.path.join(tmp_dir, "checkpoint-25") self.check_checkpoint_deletion(trainer, tmp_dir, [25]) trainer.state.best_model_checkpoint = os.path.join(tmp_dir, "checkpoint-5") self.check_checkpoint_deletion(trainer, tmp_dir, [5, 25]) def check_mem_metrics(self, trainer, check_func): metrics = trainer.train().metrics check_func("init_mem_cpu_alloc_delta", metrics) check_func("train_mem_cpu_alloc_delta", metrics) if torch.cuda.device_count() > 0: check_func("init_mem_gpu_alloc_delta", metrics) check_func("train_mem_gpu_alloc_delta", metrics) metrics = trainer.evaluate() check_func("eval_mem_cpu_alloc_delta", metrics) if torch.cuda.device_count() > 0: check_func("eval_mem_gpu_alloc_delta", metrics) metrics = trainer.predict(RegressionDataset()).metrics check_func("test_mem_cpu_alloc_delta", metrics) if torch.cuda.device_count() > 0: check_func("test_mem_gpu_alloc_delta", metrics) def test_mem_metrics(self): # with mem metrics enabled trainer = get_regression_trainer(skip_memory_metrics=False) self.check_mem_metrics(trainer, self.assertIn) # with mem metrics disabled trainer = get_regression_trainer(skip_memory_metrics=True) self.check_mem_metrics(trainer, self.assertNotIn) @require_torch_gpu def test_fp16_full_eval(self): # this is a sensitive test so let's keep debugging printouts in place for quick diagnosis. # it's using pretty large safety margins, but small enough to detect broken functionality. debug = 0 n_gpus = get_gpu_count() bs = 8 eval_len = 16 * n_gpus # make the params somewhat big so that there will be enough RAM consumed to be able to # measure things. We should get about 64KB for a+b in fp32 a = torch.ones(1000, bs) + 0.001 b = torch.ones(1000, bs) - 0.001 # 1. with fp16_full_eval disabled trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, skip_memory_metrics=False) metrics = trainer.evaluate() del trainer gc.collect() fp32_init = metrics["init_mem_gpu_alloc_delta"] fp32_eval = metrics["eval_mem_gpu_alloc_delta"] if debug: print(f"fp32_init {fp32_init}") print(f"fp32_eval {fp32_eval}") # here we expect the model to be preloaded in trainer.__init__ and consume around 64K gpu ram. # perfect world: fp32_init == 64<<10 self.assertGreater(fp32_init, 59_000) # after eval should be no extra memory allocated - with a small margin (other than the peak # memory consumption for the forward calculation that gets recovered) # perfect world: fp32_eval == close to zero self.assertLess(fp32_eval, 5_000) # 2. with fp16_full_eval enabled trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, fp16_full_eval=True, skip_memory_metrics=False) metrics = trainer.evaluate() fp16_init = metrics["init_mem_gpu_alloc_delta"] fp16_eval = metrics["eval_mem_gpu_alloc_delta"] if debug: print(f"fp16_init {fp16_init}") print(f"fp16_eval {fp16_eval}") # here we expect the model to not be preloaded in trainer.__init__, so with a small margin it should be close to 0 # perfect world: fp16_init == close to zero self.assertLess(fp16_init, 5_000) # here we put the model on device in eval and only `half()` of it, i.e. about 32K,(again we ignore the peak margin which gets returned back) # perfect world: fp32_init == 32<<10 self.assertGreater(fp16_eval, 27_000) # 3. relative comparison fp32 vs full fp16 # should be about half of fp16_init # perfect world: fp32_init/2 == fp16_eval self.assertAlmostEqual(fp16_eval, fp32_init / 2, delta=5_000) @require_torch_non_multi_gpu @require_torchdynamo @require_torch_tensorrt_fx def test_torchdynamo_full_eval(self): import torchdynamo # torchdynamo at the moment doesn't support DP/DDP, therefore require a single gpu n_gpus = get_gpu_count() bs = 8 eval_len = 16 * n_gpus # make the params are somewhat big so that there will be enough RAM consumed to be able to # measure things. We should get about 64KB for a+b in fp32 a = torch.ones(1000, bs) + 0.001 b = torch.ones(1000, bs) - 0.001 # 1. Default - without TorchDynamo trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len) metrics = trainer.evaluate() original_eval_loss = metrics["eval_loss"] del trainer # 2. TorchDynamo eager trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, torchdynamo="eager") metrics = trainer.evaluate() self.assertAlmostEqual(metrics["eval_loss"], original_eval_loss) del trainer torchdynamo.reset() # 3. TorchDynamo nvfuser trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, torchdynamo="nvfuser") metrics = trainer.evaluate() self.assertAlmostEqual(metrics["eval_loss"], original_eval_loss) torchdynamo.reset() # 4. TorchDynamo fx2trt trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, torchdynamo="fx2trt") metrics = trainer.evaluate() self.assertAlmostEqual(metrics["eval_loss"], original_eval_loss) torchdynamo.reset() @unittest.skip("torch 2.0.0 gives `ModuleNotFoundError: No module named 'torchdynamo'`.") @require_torch_non_multi_gpu @require_torchdynamo def test_torchdynamo_memory(self): # torchdynamo at the moment doesn't support DP/DDP, therefore require a single gpu import torchdynamo class CustomTrainer(Trainer): def compute_loss(self, model, inputs, return_outputs=False): x = inputs["x"] output = model(x) if self.args.n_gpu == 1: return output.mean() return output class MyModule(torch.nn.Module): """Simple module that does aggressive fusion""" def __init__(self): super().__init__() def forward(self, x): for _ in range(20): x = torch.cos(x) return x mod = MyModule() # 1. without TorchDynamo (eager baseline) a = torch.ones(1024, 1024, device="cuda", requires_grad=True) a.grad = None trainer = CustomTrainer(model=mod) # warmup for _ in range(10): orig_loss = trainer.training_step(mod, {"x": a}) # resets gc.collect() torch.cuda.empty_cache() torch.cuda.reset_peak_memory_stats() orig_loss = trainer.training_step(mod, {"x": a}) orig_peak_mem = torch.cuda.max_memory_allocated() torchdynamo.reset() del trainer # 2. TorchDynamo nvfuser a = torch.ones(1024, 1024, device="cuda", requires_grad=True) a.grad = None args = TrainingArguments(output_dir="None", torchdynamo="nvfuser") trainer = CustomTrainer(model=mod, args=args) # warmup for _ in range(10): loss = trainer.training_step(mod, {"x": a}) # resets gc.collect() torch.cuda.empty_cache() torch.cuda.reset_peak_memory_stats() loss = trainer.training_step(mod, {"x": a}) peak_mem = torch.cuda.max_memory_allocated() torchdynamo.reset() del trainer # Functional check self.assertAlmostEqual(loss, orig_loss) # AOT Autograd recomputaion and nvfuser recomputation optimization # aggressively fuses the operations and reduce the memory footprint. self.assertGreater(orig_peak_mem, peak_mem * 2) @require_torch_gpu @require_torch_bf16_gpu def test_bf16_full_eval(self): # note: most of the logic is the same as test_fp16_full_eval # this is a sensitive test so let's keep debugging printouts in place for quick diagnosis. # it's using pretty large safety margins, but small enough to detect broken functionality. debug = 0 n_gpus = get_gpu_count() bs = 8 eval_len = 16 * n_gpus # make the params somewhat big so that there will be enough RAM consumed to be able to # measure things. We should get about 64KB for a+b in fp32 a = torch.ones(1000, bs) + 0.001 b = torch.ones(1000, bs) - 0.001 # 1. with bf16_full_eval disabled trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, skip_memory_metrics=False) metrics = trainer.evaluate() del trainer gc.collect() fp32_init = metrics["init_mem_gpu_alloc_delta"] fp32_eval = metrics["eval_mem_gpu_alloc_delta"] if debug: print(f"fp32_init {fp32_init}") print(f"fp32_eval {fp32_eval}") # here we expect the model to be preloaded in trainer.__init__ and consume around 64K gpu ram. # perfect world: fp32_init == 64<<10 self.assertGreater(fp32_init, 59_000) # after eval should be no extra memory allocated - with a small margin (other than the peak # memory consumption for the forward calculation that gets recovered) # perfect world: fp32_eval == close to zero self.assertLess(fp32_eval, 5_000) # 2. with bf16_full_eval enabled trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, bf16_full_eval=True, skip_memory_metrics=False) metrics = trainer.evaluate() bf16_init = metrics["init_mem_gpu_alloc_delta"] bf16_eval = metrics["eval_mem_gpu_alloc_delta"] if debug: print(f"bf16_init {bf16_init}") print(f"bf16_eval {bf16_eval}") # here we expect the model to not be preloaded in trainer.__init__, so with a small margin it should be close to 0 # perfect world: bf16_init == close to zero self.assertLess(bf16_init, 5_000) # here we put the model on device in eval and only `half()` of it, i.e. about 32K,(again we ignore the peak margin which gets returned back) # perfect world: fp32_init == 32<<10 self.assertGreater(bf16_eval, 27_000) # 3. relative comparison fp32 vs full bf16 # should be about half of bf16_init # perfect world: fp32_init/2 == bf16_eval self.assertAlmostEqual(bf16_eval, fp32_init / 2, delta=5_000) def test_no_wd_param_group(self): model = nn.Sequential(TstLayer(128), nn.ModuleList([TstLayer(128), TstLayer(128)])) trainer = Trainer(model=model) trainer.create_optimizer_and_scheduler(10) # fmt: off wd_names = ['0.linear1.weight', '0.linear2.weight', '1.0.linear1.weight', '1.0.linear2.weight', '1.1.linear1.weight', '1.1.linear2.weight'] # fmt: on wd_params = [p for n, p in model.named_parameters() if n in wd_names] no_wd_params = [p for n, p in model.named_parameters() if n not in wd_names] self.assertListEqual(trainer.optimizer.param_groups[0]["params"], wd_params) self.assertListEqual(trainer.optimizer.param_groups[1]["params"], no_wd_params) @slow @require_torch_multi_gpu def test_end_to_end_example(self): # Tests that `translation.py` will run without issues script_path = os.path.abspath( os.path.join( os.path.dirname(__file__), "..", "..", "examples", "pytorch", "translation", "run_translation.py" ) ) with tempfile.TemporaryDirectory() as tmpdir: command = [ "accelerate", "launch", script_path, "--model_name_or_path", "t5-small", "--per_device_train_batch_size", "1", "--output_dir", tmpdir, "--overwrite_output_dir", "--do_train", "--max_train_samples", "64", "--num_train_epochs", "1", "--dataset_name", "wmt16", "--dataset_config", "ro-en", "--source_lang", "en", "--target_lang", "ro", "--do_predict", "--max_predict_samples", "64", "--predict_with_generate", "--ddp_timeout", "60", ] execute_subprocess_async(command) # successful return here == success - any errors would have caused an error or a timeout in the sub-call @require_torch @is_staging_test class TrainerIntegrationWithHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): for model in ["test-trainer", "test-trainer-epoch", "test-trainer-step"]: try: delete_repo(token=cls._token, repo_id=model) except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-trainer-org") except HTTPError: pass def test_push_to_hub(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, "test-trainer"), push_to_hub=True, hub_token=self._token, ) url = trainer.push_to_hub() # Extract repo_name from the url re_search = re.search(ENDPOINT_STAGING + r"/([^/]+/[^/]+)/", url) self.assertTrue(re_search is not None) repo_name = re_search.groups()[0] self.assertEqual(repo_name, f"{USER}/test-trainer") model = RegressionPreTrainedModel.from_pretrained(repo_name) self.assertEqual(model.a.item(), trainer.model.a.item()) self.assertEqual(model.b.item(), trainer.model.b.item()) def test_push_to_hub_in_organization(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer(output_dir=tmp_dir) trainer.save_model() trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, "test-trainer-org"), push_to_hub=True, hub_model_id="valid_org/test-trainer-org", hub_token=self._token, ) url = trainer.push_to_hub() # Extract repo_name from the url re_search = re.search(ENDPOINT_STAGING + r"/([^/]+/[^/]+)/", url) self.assertTrue(re_search is not None) repo_name = re_search.groups()[0] self.assertEqual(repo_name, "valid_org/test-trainer-org") model = RegressionPreTrainedModel.from_pretrained("valid_org/test-trainer-org") self.assertEqual(model.a.item(), trainer.model.a.item()) self.assertEqual(model.b.item(), trainer.model.b.item()) def get_commit_history(self, repo): commit_logs = subprocess.run( "git log".split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE, check=True, encoding="utf-8", cwd=repo, ).stdout commits = commit_logs.split("\n\n")[1::2] return [commit.strip() for commit in commits] def test_push_to_hub_with_saves_each_epoch(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, "test-trainer-epoch"), push_to_hub=True, hub_token=self._token, save_strategy="epoch", ) trainer.train() # Wait for the async pushes to be finished while trainer.push_in_progress is not None and not trainer.push_in_progress.is_done: time.sleep(0.5) with tempfile.TemporaryDirectory() as tmp_dir: _ = Repository(tmp_dir, clone_from=f"{USER}/test-trainer-epoch", token=self._token) commits = self.get_commit_history(tmp_dir) self.assertIn("initial commit", commits) # We can't test that epoch 2 and 3 are in the commits without being flaky as those might be skipped if # the push for epoch 1 wasn't finished at the time. self.assertIn("Training in progress, epoch 1", commits) def test_push_to_hub_with_saves_each_n_steps(self): num_gpus = max(1, get_gpu_count()) if num_gpus > 2: return with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, "test-trainer-step"), push_to_hub=True, hub_token=self._token, save_strategy="steps", save_steps=5, ) trainer.train() # Wait for the async pushes to be finished while trainer.push_in_progress is not None and not trainer.push_in_progress.is_done: time.sleep(0.5) with tempfile.TemporaryDirectory() as tmp_dir: _ = Repository(tmp_dir, clone_from=f"{USER}/test-trainer-step", token=self._token) commits = self.get_commit_history(tmp_dir) self.assertIn("initial commit", commits) # We can't test that epoch 2 and 3 are in the commits without being flaky as those might be skipped if # the push for epoch 1 wasn't finished at the time. self.assertIn("Training in progress, step 5", commits) @require_torch @require_optuna class TrainerHyperParameterOptunaIntegrationTest(unittest.TestCase): def setUp(self): args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def test_hyperparameter_search(self): class MyTrialShortNamer(TrialShortNamer): DEFAULTS = {"a": 0, "b": 0} def hp_space(trial): return {} def model_init(trial): if trial is not None: a = trial.suggest_int("a", -4, 4) b = trial.suggest_int("b", -4, 4) else: a = 0 b = 0 config = RegressionModelConfig(a=a, b=b, double_output=False) return RegressionPreTrainedModel(config) def hp_name(trial): return MyTrialShortNamer.shortname(trial.params) with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, evaluation_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=4, disable_tqdm=True, load_best_model_at_end=True, logging_dir="runs", run_name="test", model_init=model_init, ) trainer.hyperparameter_search(direction="minimize", hp_space=hp_space, hp_name=hp_name, n_trials=4) @require_torch @require_ray class TrainerHyperParameterRayIntegrationTest(unittest.TestCase): def setUp(self): args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def ray_hyperparameter_search(self): class MyTrialShortNamer(TrialShortNamer): DEFAULTS = {"a": 0, "b": 0} def hp_space(trial): from ray import tune return { "a": tune.randint(-4, 4), "b": tune.randint(-4, 4), } def model_init(config): if config is None: a = 0 b = 0 else: a = config["a"] b = config["b"] model_config = RegressionModelConfig(a=a, b=b, double_output=False) return RegressionPreTrainedModel(model_config) def hp_name(params): return MyTrialShortNamer.shortname(params) with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, evaluation_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=4, disable_tqdm=True, load_best_model_at_end=True, logging_dir="runs", run_name="test", model_init=model_init, ) trainer.hyperparameter_search( direction="minimize", hp_space=hp_space, hp_name=hp_name, backend="ray", n_trials=4 ) def test_hyperparameter_search(self): self.ray_hyperparameter_search() def test_hyperparameter_search_ray_client(self): import ray from ray.util.client.ray_client_helpers import ray_start_client_server with ray_start_client_server(): assert ray.util.client.ray.is_connected() self.ray_hyperparameter_search() @slow @require_torch @require_sigopt class TrainerHyperParameterSigOptIntegrationTest(unittest.TestCase): def setUp(self): args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def test_hyperparameter_search(self): class MyTrialShortNamer(TrialShortNamer): DEFAULTS = {"a": 0, "b": 0} def hp_space(trial): return [ {"bounds": {"min": -4, "max": 4}, "name": "a", "type": "int"}, {"bounds": {"min": -4, "max": 4}, "name": "b", "type": "int"}, ] def model_init(trial): if trial is not None: a = trial.assignments["a"] b = trial.assignments["b"] else: a = 0 b = 0 config = RegressionModelConfig(a=a, b=b, double_output=False) return RegressionPreTrainedModel(config) def hp_name(trial): return MyTrialShortNamer.shortname(trial.assignments) with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, evaluation_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=4, disable_tqdm=True, load_best_model_at_end=True, logging_dir="runs", run_name="test", model_init=model_init, ) trainer.hyperparameter_search( direction="minimize", hp_space=hp_space, hp_name=hp_name, backend="sigopt", n_trials=4 ) optim_test_params = [] if is_torch_available(): default_adam_kwargs = { "betas": (TrainingArguments.adam_beta1, TrainingArguments.adam_beta2), "eps": TrainingArguments.adam_epsilon, "lr": TrainingArguments.learning_rate, } default_lion_kwargs = { "betas": (TrainingArguments.adam_beta1, TrainingArguments.adam_beta2), "lr": TrainingArguments.learning_rate, } default_anyprecision_kwargs = { "use_kahan_summation": False, "momentum_dtype": torch.float32, "variance_dtype": torch.float32, "compensation_buffer_dtype": torch.bfloat16, } optim_test_params = [ ( TrainingArguments(optim=OptimizerNames.ADAMW_HF, output_dir="None"), transformers.optimization.AdamW, default_adam_kwargs, ), ( TrainingArguments(optim=OptimizerNames.ADAMW_HF.value, output_dir="None"), transformers.optimization.AdamW, default_adam_kwargs, ), ( TrainingArguments(optim=OptimizerNames.ADAMW_TORCH, output_dir="None"), torch.optim.AdamW, default_adam_kwargs, ), ( TrainingArguments(optim=OptimizerNames.ADAFACTOR, output_dir="None"), transformers.optimization.Adafactor, { "scale_parameter": False, "relative_step": False, "lr": TrainingArguments.learning_rate, }, ), ] if is_apex_available(): import apex optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.ADAMW_APEX_FUSED, output_dir="None"), apex.optimizers.FusedAdam, default_adam_kwargs, ) ) if is_bitsandbytes_available(): import bitsandbytes as bnb optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.ADAMW_BNB, output_dir="None"), bnb.optim.AdamW, default_adam_kwargs, ) ) optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.ADAMW_8BIT, output_dir="None"), bnb.optim.AdamW, default_adam_kwargs, ) ) optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.PAGED_ADAMW, output_dir="None"), bnb.optim.AdamW, default_adam_kwargs, ) ) optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.PAGED_ADAMW_8BIT, output_dir="None"), bnb.optim.AdamW, default_adam_kwargs, ) ) optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.LION, output_dir="None"), bnb.optim.Lion, default_lion_kwargs, ) ) optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.LION_8BIT, output_dir="None"), bnb.optim.Lion, default_lion_kwargs, ) ) optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.PAGED_LION_8BIT, output_dir="None"), bnb.optim.Lion, default_lion_kwargs, ) ) if is_torchdistx_available(): import torchdistx optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.ADAMW_ANYPRECISION, output_dir="None"), torchdistx.optimizers.AnyPrecisionAdamW, dict(default_adam_kwargs, **default_anyprecision_kwargs), ) ) @require_torch class TrainerOptimizerChoiceTest(unittest.TestCase): def check_optim_and_kwargs(self, training_args: TrainingArguments, expected_cls, expected_kwargs): actual_cls, optim_kwargs = Trainer.get_optimizer_cls_and_kwargs(training_args) self.assertEqual(expected_cls, actual_cls) self.assertIsNotNone(optim_kwargs) for p, v in expected_kwargs.items(): self.assertTrue(p in optim_kwargs) actual_v = optim_kwargs[p] self.assertTrue(actual_v == v, f"Failed check for {p}. Expected {v}, but got {actual_v}.") @parameterized.expand(optim_test_params, skip_on_empty=True) def test_optim_supported(self, training_args: TrainingArguments, expected_cls, expected_kwargs): # exercises all the valid --optim options self.check_optim_and_kwargs(training_args, expected_cls, expected_kwargs) trainer = get_regression_trainer(**training_args.to_dict()) trainer.train() def test_fused_adam(self): # Pretend that apex is installed and mock apex.optimizers.FusedAdam exists. # Trainer.get_optimizer_cls_and_kwargs does not use FusedAdam. It only has to return the # class given, so mocking apex.optimizers.FusedAdam should be fine for testing and allow # the test to run without requiring an apex installation. mock = Mock() modules = { "apex": mock, "apex.optimizers": mock.optimizers, "apex.optimizers.FusedAdam": mock.optimizers.FusedAdam, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.ADAMW_APEX_FUSED, output_dir="None"), mock.optimizers.FusedAdam, default_adam_kwargs, ) def test_fused_adam_no_apex(self): args = TrainingArguments(optim=OptimizerNames.ADAMW_APEX_FUSED, output_dir="None") # Pretend that apex does not exist, even if installed. By setting apex to None, importing # apex will fail even if apex is installed. with patch.dict("sys.modules", {"apex.optimizers": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_adam8bit(self): # Pretend that Bits and Bytes is installed and mock bnb.optim.Adam8bit exists. # Trainer.get_optimizer_cls_and_kwargs does not use Adam8bit. It only has to return the # class given, so mocking bnb.optim.Adam8bit should be fine for testing and allow # the test to run without requiring a bnb installation. mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.AdamW": mock.optim.AdamW, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.ADAMW_BNB, output_dir="None"), mock.optim.AdamW, default_adam_kwargs, ) def test_bnb_paged_adam8bit_alias(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.AdamW": mock.optim.AdamW, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.ADAMW_8BIT, output_dir="None"), mock.optim.AdamW, default_adam_kwargs, ) def test_bnb_paged_adam(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.AdamW": mock.optim.AdamW, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.PAGED_ADAMW, output_dir="None"), mock.optim.AdamW, default_adam_kwargs, ) def test_bnb_paged_adam8bit(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.AdamW": mock.optim.AdamW, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.PAGED_ADAMW_8BIT, output_dir="None"), mock.optim.AdamW, default_adam_kwargs, ) def test_bnb_lion(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.Lion": mock.optim.Lion, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.LION, output_dir="None"), mock.optim.Lion, default_lion_kwargs, ) def test_bnb_lion8bit(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.Lion": mock.optim.Lion, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.LION_8BIT, output_dir="None"), mock.optim.Lion, default_lion_kwargs, ) def test_bnb_paged_lion8bit(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.Lion": mock.optim.Lion, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.PAGED_LION_8BIT, output_dir="None"), mock.optim.Lion, default_lion_kwargs, ) def test_bnb_paged_lion(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.Lion": mock.optim.Lion, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.PAGED_LION, output_dir="None"), mock.optim.Lion, default_lion_kwargs, ) def test_bnb_adam8bit_no_bnb(self): args = TrainingArguments(optim=OptimizerNames.ADAMW_BNB, output_dir="None") # Pretend that bnb does not exist, even if installed. By setting bnb to None, importing # bnb will fail even if bnb is installed. with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_paged_adam_no_bnb(self): args = TrainingArguments(optim=OptimizerNames.PAGED_ADAMW, output_dir="None") # Pretend that bnb does not exist, even if installed. By setting bnb to None, importing # bnb will fail even if bnb is installed. with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_paged_adam8bit_no_bnb(self): args = TrainingArguments(optim=OptimizerNames.PAGED_ADAMW_8BIT, output_dir="None") # Pretend that bnb does not exist, even if installed. By setting bnb to None, importing # bnb will fail even if bnb is installed. with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_paged_lion_no_bnb(self): args = TrainingArguments(optim=OptimizerNames.PAGED_LION, output_dir="None") # Pretend that bnb does not exist, even if installed. By setting bnb to None, importing # bnb will fail even if bnb is installed. with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_paged_lion8bit_no_bnb(self): args = TrainingArguments(optim=OptimizerNames.PAGED_LION_8BIT, output_dir="None") # Pretend that bnb does not exist, even if installed. By setting bnb to None, importing # bnb will fail even if bnb is installed. with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_anyprecision_adamw(self): # Pretend that torchdistx is installed and mock torchdistx.optimizers.AnyPrecisionAdamW exists. # Trainer.get_optimizer_cls_and_kwargs does not use AnyPrecisioinAdamW. It only has to return the # class given, so mocking torchdistx.optimizers.AnyPrecisionAdamW should be fine for testing and allow # the test to run without requiring a bnb installation. mock = Mock() modules = { "torchdistx": mock, "torchdistx.optimizers": mock.optimizers, "torchdistx.optimizers.AnyPrecisionAdamW.": mock.optimizers.AnyPrecisionAdamW, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.ADAMW_ANYPRECISION, output_dir="None"), mock.optimizers.AnyPrecisionAdamW, dict(default_adam_kwargs, **default_anyprecision_kwargs), ) def test_no_torchdistx_anyprecision_adamw(self): args = TrainingArguments(optim=OptimizerNames.ADAMW_ANYPRECISION, output_dir="None") # Pretend that torchdistx does not exist, even if installed. By setting torchdistx to None, importing # torchdistx.optimizers will fail even if torchdistx is installed. with patch.dict("sys.modules", {"torchdistx.optimizers": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) @require_torch @require_wandb class TrainerHyperParameterWandbIntegrationTest(unittest.TestCase): def setUp(self): args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def test_hyperparameter_search(self): class MyTrialShortNamer(TrialShortNamer): DEFAULTS = {"a": 0, "b": 0} def hp_space(trial): return { "method": "random", "metric": {}, "parameters": { "a": {"distribution": "uniform", "min": 1e-6, "max": 1e-4}, "b": {"distribution": "int_uniform", "min": 1, "max": 6}, }, } def model_init(config): if config is None: a = 0 b = 0 else: a = config["a"] b = config["b"] model_config = RegressionModelConfig(a=a, b=b, double_output=False) return RegressionPreTrainedModel(model_config) def hp_name(params): return MyTrialShortNamer.shortname(params) with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, evaluation_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=4, disable_tqdm=True, load_best_model_at_end=True, logging_dir="runs", run_name="test", model_init=model_init, ) trainer.hyperparameter_search( direction="minimize", hp_space=hp_space, hp_name=hp_name, backend="wandb", n_trials=4, anonymous="must" ) class HyperParameterSearchBackendsTest(unittest.TestCase): def test_hyperparameter_search_backends(self): self.assertEqual( list(ALL_HYPERPARAMETER_SEARCH_BACKENDS.keys()), list(HPSearchBackend), )
118,879
40.551905
148
py
transformers
transformers-main/tests/trainer/test_trainer_distributed.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, require_torch_npu, ) from transformers.training_args import ParallelMode from transformers.utils import logging logger = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class DummyDataset(Dataset): def __init__(self, length: int = 101): self.length = length def __len__(self): return self.length def __getitem__(self, i) -> int: return i class DummyDataCollator: def __call__(self, features): return {"input_ids": torch.tensor(features), "labels": torch.tensor(features)} class DummyModel(nn.Module): def __init__(self): super().__init__() # Add some (unused) params otherwise DDP will complain. self.fc = nn.Linear(120, 80) def forward(self, input_ids, labels=None): if labels is not None: return torch.tensor(0.0, device=input_ids.device), input_ids else: return input_ids class TestTrainerDistributedNeuronCore(TestCasePlus): @require_torch_neuroncore def test_trainer(self): distributed_args = f"""--nproc_per_node=2 --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py """.split() output_dir = self.get_auto_remove_tmp_dir() args = f"--output_dir {output_dir}".split() cmd = ["torchrun"] + distributed_args + args execute_subprocess_async(cmd, env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call class TestTrainerDistributedNPU(TestCasePlus): @require_torch_npu def test_trainer(self): distributed_args = f"""--nproc_per_node=2 --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py """.split() output_dir = self.get_auto_remove_tmp_dir() args = f"--output_dir {output_dir}".split() cmd = ["torchrun"] + distributed_args + args execute_subprocess_async(cmd, env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call class TestTrainerDistributed(TestCasePlus): @require_torch_multi_gpu def test_trainer(self): distributed_args = f"""--nproc_per_node={torch.cuda.device_count()} --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py """.split() output_dir = self.get_auto_remove_tmp_dir() args = f"--output_dir {output_dir}".split() cmd = ["torchrun"] + distributed_args + args execute_subprocess_async(cmd, env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py parser = HfArgumentParser((TrainingArguments,)) training_args = parser.parse_args_into_dataclasses()[0] logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " f"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}" ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [101, 40, 7]: dataset = DummyDataset(dataset_length) def compute_metrics(p: EvalPrediction) -> Dict: sequential = list(range(len(dataset))) success = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( "Predictions and/or labels do not match expected results:\n - predictions: " f"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" ) return {"success": success} trainer = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) metrics = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) p = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) trainer.args.eval_accumulation_steps = 2 metrics = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) p = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) trainer.args.eval_accumulation_steps = None
6,342
36.093567
133
py
transformers
transformers-main/tests/trainer/test_trainer_callback.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class MyTestTrainerCallback(TrainerCallback): "A callback that registers the events that goes through." def __init__(self): self.events = [] def on_init_end(self, args, state, control, **kwargs): self.events.append("on_init_end") def on_train_begin(self, args, state, control, **kwargs): self.events.append("on_train_begin") def on_train_end(self, args, state, control, **kwargs): self.events.append("on_train_end") def on_epoch_begin(self, args, state, control, **kwargs): self.events.append("on_epoch_begin") def on_epoch_end(self, args, state, control, **kwargs): self.events.append("on_epoch_end") def on_step_begin(self, args, state, control, **kwargs): self.events.append("on_step_begin") def on_step_end(self, args, state, control, **kwargs): self.events.append("on_step_end") def on_evaluate(self, args, state, control, **kwargs): self.events.append("on_evaluate") def on_predict(self, args, state, control, **kwargs): self.events.append("on_predict") def on_save(self, args, state, control, **kwargs): self.events.append("on_save") def on_log(self, args, state, control, **kwargs): self.events.append("on_log") def on_prediction_step(self, args, state, control, **kwargs): self.events.append("on_prediction_step") @require_torch class TrainerCallbackTest(unittest.TestCase): def setUp(self): self.output_dir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.output_dir) def get_trainer(self, a=0, b=0, train_len=64, eval_len=64, callbacks=None, disable_tqdm=False, **kwargs): # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. train_dataset = RegressionDataset(length=train_len) eval_dataset = RegressionDataset(length=eval_len) config = RegressionModelConfig(a=a, b=b) model = RegressionPreTrainedModel(config) args = TrainingArguments(self.output_dir, disable_tqdm=disable_tqdm, report_to=[], **kwargs) return Trainer( model, args, train_dataset=train_dataset, eval_dataset=eval_dataset, callbacks=callbacks, ) def check_callbacks_equality(self, cbs1, cbs2): self.assertEqual(len(cbs1), len(cbs2)) # Order doesn't matter cbs1 = sorted(cbs1, key=lambda cb: cb.__name__ if isinstance(cb, type) else cb.__class__.__name__) cbs2 = sorted(cbs2, key=lambda cb: cb.__name__ if isinstance(cb, type) else cb.__class__.__name__) for cb1, cb2 in zip(cbs1, cbs2): if isinstance(cb1, type) and isinstance(cb2, type): self.assertEqual(cb1, cb2) elif isinstance(cb1, type) and not isinstance(cb2, type): self.assertEqual(cb1, cb2.__class__) elif not isinstance(cb1, type) and isinstance(cb2, type): self.assertEqual(cb1.__class__, cb2) else: self.assertEqual(cb1, cb2) def get_expected_events(self, trainer): expected_events = ["on_init_end", "on_train_begin"] step = 0 train_dl_len = len(trainer.get_eval_dataloader()) evaluation_events = ["on_prediction_step"] * len(trainer.get_eval_dataloader()) + ["on_log", "on_evaluate"] for _ in range(trainer.state.num_train_epochs): expected_events.append("on_epoch_begin") for _ in range(train_dl_len): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("on_log") if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("on_save") expected_events.append("on_epoch_end") if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def test_init_callback(self): trainer = self.get_trainer() expected_callbacks = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) # Callbacks passed at init are added to the default callbacks trainer = self.get_trainer(callbacks=[MyTestTrainerCallback]) expected_callbacks.append(MyTestTrainerCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback trainer = self.get_trainer(disable_tqdm=True) expected_callbacks = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) def test_add_remove_callback(self): expected_callbacks = DEFAULT_CALLBACKS.copy() + [ProgressCallback] trainer = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(DefaultFlowCallback) expected_callbacks.remove(DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer = self.get_trainer() cb = trainer.pop_callback(DefaultFlowCallback) self.assertEqual(cb.__class__, DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer.add_callback(DefaultFlowCallback) expected_callbacks.insert(0, DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) # We can also add, pop, or remove by instance trainer = self.get_trainer() cb = trainer.callback_handler.callbacks[0] trainer.remove_callback(cb) expected_callbacks.remove(DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer = self.get_trainer() cb1 = trainer.callback_handler.callbacks[0] cb2 = trainer.pop_callback(cb1) self.assertEqual(cb1, cb2) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer.add_callback(cb1) expected_callbacks.insert(0, DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) def test_event_flow(self): import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="ignore", category=UserWarning) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback]) trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) # Independent log/save/eval trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], logging_steps=5) trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], save_steps=5) trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], eval_steps=5, evaluation_strategy="steps") trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], evaluation_strategy="epoch") trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) # A bit of everything trainer = self.get_trainer( callbacks=[MyTestTrainerCallback], logging_steps=3, save_steps=10, eval_steps=5, evaluation_strategy="steps", ) trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) # warning should be emitted for duplicated callbacks with patch("transformers.trainer_callback.logger.warning") as warn_mock: trainer = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback], ) assert str(MyTestTrainerCallback) in warn_mock.call_args[0][0]
10,273
40.764228
118
py
transformers
transformers-main/tests/trainer/test_trainer_utils.py
# coding=utf-8 # Copyright 2018 the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import unittest import numpy as np from transformers.data.data_collator import default_data_collator from transformers.testing_utils import require_accelerate, require_torch from transformers.trainer_utils import RemoveColumnsCollator, find_executable_batch_size from transformers.utils import is_torch_available if is_torch_available(): import torch from torch import nn from torch.utils.data import IterableDataset from transformers.modeling_outputs import SequenceClassifierOutput from transformers.tokenization_utils_base import BatchEncoding from transformers.trainer_pt_utils import ( DistributedLengthGroupedSampler, DistributedSamplerWithLoop, DistributedTensorGatherer, IterableDatasetShard, LabelSmoother, LengthGroupedSampler, SequentialDistributedSampler, ShardSampler, get_parameter_names, numpy_pad_and_concatenate, torch_pad_and_concatenate, ) class TstLayer(nn.Module): def __init__(self, hidden_size): super().__init__() self.linear1 = nn.Linear(hidden_size, hidden_size) self.ln1 = nn.LayerNorm(hidden_size) self.linear2 = nn.Linear(hidden_size, hidden_size) self.ln2 = nn.LayerNorm(hidden_size) self.bias = nn.Parameter(torch.zeros(hidden_size)) def forward(self, x): h = self.ln1(nn.functional.relu(self.linear1(x))) h = nn.functional.relu(self.linear2(x)) return self.ln2(x + h + self.bias) class RandomIterableDataset(IterableDataset): # For testing, an iterable dataset of random length def __init__(self, p_stop=0.01, max_length=1000): self.p_stop = p_stop self.max_length = max_length self.generator = torch.Generator() def __iter__(self): count = 0 stop = False while not stop and count < self.max_length: yield count count += 1 number = torch.rand(1, generator=self.generator).item() stop = number < self.p_stop @require_torch class TrainerUtilsTest(unittest.TestCase): def test_distributed_tensor_gatherer(self): # Simulate a result with a dataset of size 21, 4 processes and chunks of lengths 2, 3, 1 world_size = 4 num_samples = 21 input_indices = [ [0, 1, 6, 7, 12, 13, 18, 19], [2, 3, 4, 8, 9, 10, 14, 15, 16, 20, 0, 1], [5, 11, 17, 2], ] predictions = np.random.normal(size=(num_samples, 13)) gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples) for indices in input_indices: gatherer.add_arrays(predictions[indices]) result = gatherer.finalize() self.assertTrue(np.array_equal(result, predictions)) # With nested tensors gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples) for indices in input_indices: gatherer.add_arrays([predictions[indices], [predictions[indices], predictions[indices]]]) result = gatherer.finalize() self.assertTrue(isinstance(result, list)) self.assertEqual(len(result), 2) self.assertTrue(isinstance(result[1], list)) self.assertEqual(len(result[1]), 2) self.assertTrue(np.array_equal(result[0], predictions)) self.assertTrue(np.array_equal(result[1][0], predictions)) self.assertTrue(np.array_equal(result[1][1], predictions)) def test_distributed_tensor_gatherer_different_shapes(self): # Simulate a result with a dataset of size 21, 4 processes and chunks of lengths 2, 3, 1 world_size = 4 num_samples = 21 input_indices = [ [0, 1, 6, 7, 12, 13, 18, 19], [2, 3, 4, 8, 9, 10, 14, 15, 16, 20, 0, 1], [5, 11, 17, 2], ] sequence_lengths = [8, 10, 13] predictions = np.random.normal(size=(num_samples, 13)) gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples) for indices, seq_length in zip(input_indices, sequence_lengths): gatherer.add_arrays(predictions[indices, :seq_length]) result = gatherer.finalize() # Remove the extra samples added at the end for a round multiple of num processes. actual_indices = [input_indices[0], input_indices[1][:-2], input_indices[2][:-1]] for indices, seq_length in zip(actual_indices, sequence_lengths): self.assertTrue(np.array_equal(result[indices, :seq_length], predictions[indices, :seq_length])) # With nested tensors predictions = np.random.normal(size=(num_samples, 13)) gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples) for indices, seq_length in zip(input_indices, sequence_lengths): gatherer.add_arrays([predictions[indices, :seq_length], predictions[indices]]) result = gatherer.finalize() for indices, seq_length in zip(actual_indices, sequence_lengths): self.assertTrue(np.array_equal(result[0][indices, :seq_length], predictions[indices, :seq_length])) self.assertTrue(np.array_equal(result[1], predictions)) # Check if works if varying seq_length is second gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples) for indices, seq_length in zip(input_indices, sequence_lengths): gatherer.add_arrays([predictions[indices], predictions[indices, :seq_length]]) result = gatherer.finalize() self.assertTrue(np.array_equal(result[0], predictions)) for indices, seq_length in zip(actual_indices, sequence_lengths): self.assertTrue(np.array_equal(result[1][indices, :seq_length], predictions[indices, :seq_length])) def test_label_smoothing(self): epsilon = 0.1 num_labels = 12 random_logits = torch.randn(4, 5, num_labels) random_labels = torch.randint(0, num_labels, (4, 5)) loss = nn.functional.cross_entropy(random_logits.view(-1, num_labels), random_labels.view(-1)) model_output = SequenceClassifierOutput(logits=random_logits) label_smoothed_loss = LabelSmoother(0.1)(model_output, random_labels) log_probs = -nn.functional.log_softmax(random_logits, dim=-1) expected_loss = (1 - epsilon) * loss + epsilon * log_probs.mean() self.assertTrue(torch.allclose(label_smoothed_loss, expected_loss)) # With a few -100 labels random_labels[0, 1] = -100 random_labels[2, 1] = -100 random_labels[2, 3] = -100 loss = nn.functional.cross_entropy(random_logits.view(-1, num_labels), random_labels.view(-1)) model_output = SequenceClassifierOutput(logits=random_logits) label_smoothed_loss = LabelSmoother(0.1)(model_output, random_labels) log_probs = -nn.functional.log_softmax(random_logits, dim=-1) # Mask the log probs with the -100 labels log_probs[0, 1] = 0.0 log_probs[2, 1] = 0.0 log_probs[2, 3] = 0.0 expected_loss = (1 - epsilon) * loss + epsilon * log_probs.sum() / (num_labels * 17) self.assertTrue(torch.allclose(label_smoothed_loss, expected_loss)) def test_group_by_length(self): # Get some inputs of random lengths lengths = torch.randint(0, 25, (100,)).tolist() # Put one bigger than the others to check it ends up in first position lengths[32] = 50 indices = list(LengthGroupedSampler(4, lengths=lengths)) # The biggest element should be first self.assertEqual(lengths[indices[0]], 50) # The indices should be a permutation of range(100) self.assertEqual(sorted(indices), list(range(100))) def test_group_by_length_with_dict(self): # Get some inputs of random lengths data = [] for _ in range(6): input_ids = torch.randint(0, 25, (100,)).tolist() data.append({"input_ids": input_ids}) # Put one bigger than the others to check it ends up in first position data[3]["input_ids"] = torch.randint(0, 25, (105,)).tolist() indices = list(LengthGroupedSampler(4, dataset=data)) # The biggest element should be first self.assertEqual(len(data[indices[0]]["input_ids"]), 105) # The indices should be a permutation of range(6) self.assertEqual(sorted(indices), list(range(6))) def test_group_by_length_with_batch_encoding(self): # Get some inputs of random lengths data = [] for _ in range(6): input_ids = torch.randint(0, 25, (100,)).tolist() data.append(BatchEncoding({"input_ids": input_ids})) # Put one bigger than the others to check it ends up in first position data[3]["input_ids"] = torch.randint(0, 25, (105,)).tolist() indices = list(LengthGroupedSampler(4, dataset=data)) # The biggest element should be first self.assertEqual(len(data[indices[0]]["input_ids"]), 105) # The indices should be a permutation of range(6) self.assertEqual(sorted(indices), list(range(6))) def test_distributed_length_grouped(self): # Get some inputs of random lengths lengths = torch.randint(0, 25, (100,)).tolist() # Put one bigger than the others to check it ends up in first position lengths[32] = 50 indices_process_0 = list(DistributedLengthGroupedSampler(4, num_replicas=2, rank=0, lengths=lengths)) indices_process_1 = list(DistributedLengthGroupedSampler(4, num_replicas=2, rank=1, lengths=lengths)) # The biggest element should be first self.assertEqual(lengths[indices_process_0[0]], 50) # The indices should be a permutation of range(100) self.assertEqual(sorted(indices_process_0 + indices_process_1), list(range(100))) def test_get_parameter_names(self): model = nn.Sequential(TstLayer(128), nn.ModuleList([TstLayer(128), TstLayer(128)])) # fmt: off self.assertEqual( get_parameter_names(model, [nn.LayerNorm]), ['0.linear1.weight', '0.linear1.bias', '0.linear2.weight', '0.linear2.bias', '0.bias', '1.0.linear1.weight', '1.0.linear1.bias', '1.0.linear2.weight', '1.0.linear2.bias', '1.0.bias', '1.1.linear1.weight', '1.1.linear1.bias', '1.1.linear2.weight', '1.1.linear2.bias', '1.1.bias'] ) # fmt: on def test_distributed_sampler_with_loop(self): batch_size = 16 for length in [23, 64, 123]: dataset = list(range(length)) shard1 = DistributedSamplerWithLoop(dataset, batch_size, num_replicas=2, rank=0) shard2 = DistributedSamplerWithLoop(dataset, batch_size, num_replicas=2, rank=1) # Set seeds shard1.set_epoch(0) shard2.set_epoch(0) # Sample samples1 = list(shard1) samples2 = list(shard2) self.assertTrue(len(samples1) % batch_size == 0) self.assertTrue(len(samples2) % batch_size == 0) total = [] for sample1, sample2 in zip(samples1, samples2): total += [sample1, sample2] self.assertEqual(set(total[:length]), set(dataset)) self.assertEqual(set(total[length:]), set(total[: (len(total) - length)])) def test_sequential_distributed_sampler(self): batch_size = 16 for length in [23, 64, 123]: dataset = list(range(length)) shard1 = SequentialDistributedSampler(dataset, num_replicas=2, rank=0) shard2 = SequentialDistributedSampler(dataset, num_replicas=2, rank=1) # Sample samples1 = list(shard1) samples2 = list(shard2) total = samples1 + samples2 self.assertListEqual(total[:length], dataset) self.assertListEqual(total[length:], dataset[: (len(total) - length)]) # With a batch_size passed shard1 = SequentialDistributedSampler(dataset, num_replicas=2, rank=0, batch_size=batch_size) shard2 = SequentialDistributedSampler(dataset, num_replicas=2, rank=1, batch_size=batch_size) # Sample samples1 = list(shard1) samples2 = list(shard2) self.assertTrue(len(samples1) % batch_size == 0) self.assertTrue(len(samples2) % batch_size == 0) total = samples1 + samples2 self.assertListEqual(total[:length], dataset) self.assertListEqual(total[length:], dataset[: (len(total) - length)]) def check_iterable_dataset_shard(self, dataset, batch_size, drop_last, num_processes=2, epoch=0): # Set the seed for the base dataset to get the proper reference. dataset.generator.manual_seed(epoch) reference = list(dataset) shards = [ IterableDatasetShard( dataset, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i ) for i in range(num_processes) ] for shard in shards: shard.set_epoch(epoch) shard_lists = [list(shard) for shard in shards] for shard in shard_lists: # All shards have a number of samples that is a round multiple of batch size self.assertTrue(len(shard) % batch_size == 0) # All shards have the same number of samples self.assertEqual(len(shard), len(shard_lists[0])) for shard in shards: # All shards know the total number of samples self.assertEqual(shard.num_examples, len(reference)) observed = [] for idx in range(0, len(shard_lists[0]), batch_size): for shard in shard_lists: observed += shard[idx : idx + batch_size] # If drop_last is False we loop through samples at the beginning to have a size that is a round multiple of # batch_size if not drop_last: while len(reference) < len(observed): reference += reference self.assertListEqual(observed, reference[: len(observed)]) # Check equivalence between IterableDataset and ShardSampler dataset.generator.manual_seed(epoch) reference = list(dataset) sampler_shards = [ ShardSampler( reference, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i ) for i in range(num_processes) ] for shard, sampler_shard in zip(shard_lists, sampler_shards): self.assertListEqual(shard, list(sampler_shard)) def test_iterable_dataset_shard(self): dataset = RandomIterableDataset() self.check_iterable_dataset_shard(dataset, 4, drop_last=True, num_processes=2, epoch=0) self.check_iterable_dataset_shard(dataset, 4, drop_last=False, num_processes=2, epoch=0) self.check_iterable_dataset_shard(dataset, 4, drop_last=True, num_processes=3, epoch=42) self.check_iterable_dataset_shard(dataset, 4, drop_last=False, num_processes=3, epoch=42) def test_iterable_dataset_shard_with_length(self): sampler_shards = [ IterableDatasetShard(list(range(100)), batch_size=4, drop_last=True, num_processes=2, process_index=i) for i in range(2) ] # Build expected shards: each process will have batches of size 4 until there is not enough elements to # form two full batches (so we stop at 96 = (100 // (4 * 2)) * 4) expected_shards = [[], []] current_shard = 0 for i in range(0, 96, 4): expected_shards[current_shard].extend(list(range(i, i + 4))) current_shard = 1 - current_shard self.assertListEqual([list(shard) for shard in sampler_shards], expected_shards) self.assertListEqual([len(shard) for shard in sampler_shards], [len(shard) for shard in expected_shards]) sampler_shards = [ IterableDatasetShard(list(range(100)), batch_size=4, drop_last=False, num_processes=2, process_index=i) for i in range(2) ] # When drop_last=False, we get two last full batches by looping back to the beginning. expected_shards[0].extend(list(range(96, 100))) expected_shards[1].extend(list(range(0, 4))) self.assertListEqual([list(shard) for shard in sampler_shards], expected_shards) self.assertListEqual([len(shard) for shard in sampler_shards], [len(shard) for shard in expected_shards]) def check_shard_sampler(self, dataset, batch_size, drop_last, num_processes=2): shards = [ ShardSampler( dataset, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i ) for i in range(num_processes) ] shard_lists = [list(shard) for shard in shards] for shard in shard_lists: # All shards have a number of samples that is a round multiple of batch size self.assertTrue(len(shard) % batch_size == 0) # All shards have the same number of samples self.assertEqual(len(shard), len(shard_lists[0])) observed = [] for idx in range(0, len(shard_lists[0]), batch_size): for shard in shard_lists: observed += shard[idx : idx + batch_size] # If drop_last is False we loop through samples at the beginning to have a size that is a round multiple of # batch_size reference = copy.copy(dataset) if not drop_last: while len(reference) < len(observed): reference += reference self.assertListEqual(observed, reference[: len(observed)]) def test_shard_sampler(self): for n_elements in [64, 123]: dataset = list(range(n_elements)) self.check_shard_sampler(dataset, 4, drop_last=True, num_processes=2) self.check_shard_sampler(dataset, 4, drop_last=False, num_processes=2) self.check_shard_sampler(dataset, 4, drop_last=True, num_processes=3) self.check_shard_sampler(dataset, 4, drop_last=False, num_processes=3) @require_accelerate def test_executable_batch_size(self): batch_sizes = [] @find_executable_batch_size(starting_batch_size=64, auto_find_batch_size=True) def mock_training_loop_function(batch_size): nonlocal batch_sizes batch_sizes.append(batch_size) if batch_size > 16: raise RuntimeError("CUDA out of memory.") mock_training_loop_function() self.assertEqual(batch_sizes, [64, 32, 16]) @require_accelerate def test_executable_batch_size_no_search(self): batch_sizes = [] @find_executable_batch_size(starting_batch_size=64, auto_find_batch_size=False) def mock_training_loop_function(batch_size): nonlocal batch_sizes batch_sizes.append(batch_size) mock_training_loop_function() self.assertEqual(batch_sizes, [64]) @require_accelerate def test_executable_batch_size_with_error(self): @find_executable_batch_size(starting_batch_size=64, auto_find_batch_size=False) def mock_training_loop_function(batch_size): raise RuntimeError("CUDA out of memory.") with self.assertRaises(RuntimeError) as cm: mock_training_loop_function() self.assertEqual("CUDA out of memory", cm.args[0]) def test_pad_and_concatenate_with_1d(self): """Tests whether pad_and_concatenate works with scalars.""" array1 = 1.0 array2 = 2.0 result = numpy_pad_and_concatenate(array1, array2) self.assertTrue(np.array_equal(np.array([1.0, 2.0]), result)) tensor1 = torch.tensor(1.0) tensor2 = torch.tensor(2.0) result = torch_pad_and_concatenate(tensor1, tensor2) self.assertTrue(torch.equal(result, torch.Tensor([1.0, 2.0]))) def test_remove_columns_collator(self): class MockLogger: def __init__(self) -> None: self.called = 0 def info(self, msg): self.called += 1 self.last_msg = msg data_batch = [ {"col1": 1, "col2": 2, "col3": 3}, {"col1": 1, "col2": 2, "col3": 3}, ] logger = MockLogger() remove_columns_collator = RemoveColumnsCollator( default_data_collator, ["col1", "col2"], logger, "model", "training" ) self.assertNotIn("col3", remove_columns_collator(data_batch)) # check that the logging message is printed out only once remove_columns_collator(data_batch) remove_columns_collator(data_batch) self.assertEqual(logger.called, 1) self.assertIn("col3", logger.last_msg)
21,726
42.454
290
py
transformers
transformers-main/tests/trainer/test_trainer_seq2seq.py
# coding=utf-8 # Copyright 2020 the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from transformers import BertTokenizer, EncoderDecoderModel, Seq2SeqTrainer, Seq2SeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class Seq2seqTrainerTester(TestCasePlus): @slow @require_torch def test_finetune_bert2bert(self): bert2bert = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny", "prajjwal1/bert-tiny") tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") bert2bert.config.vocab_size = bert2bert.config.encoder.vocab_size bert2bert.config.eos_token_id = tokenizer.sep_token_id bert2bert.config.decoder_start_token_id = tokenizer.cls_token_id bert2bert.config.max_length = 128 train_dataset = datasets.load_dataset("cnn_dailymail", "3.0.0", split="train[:1%]") val_dataset = datasets.load_dataset("cnn_dailymail", "3.0.0", split="validation[:1%]") train_dataset = train_dataset.select(range(32)) val_dataset = val_dataset.select(range(16)) batch_size = 4 def _map_to_encoder_decoder_inputs(batch): # Tokenizer will automatically set [BOS] <text> [EOS] inputs = tokenizer(batch["article"], padding="max_length", truncation=True, max_length=512) outputs = tokenizer(batch["highlights"], padding="max_length", truncation=True, max_length=128) batch["input_ids"] = inputs.input_ids batch["attention_mask"] = inputs.attention_mask batch["decoder_input_ids"] = outputs.input_ids batch["labels"] = outputs.input_ids.copy() batch["labels"] = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"] ] batch["decoder_attention_mask"] = outputs.attention_mask assert all(len(x) == 512 for x in inputs.input_ids) assert all(len(x) == 128 for x in outputs.input_ids) return batch def _compute_metrics(pred): labels_ids = pred.label_ids pred_ids = pred.predictions # all unnecessary tokens are removed pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True) label_str = tokenizer.batch_decode(labels_ids, skip_special_tokens=True) accuracy = sum([int(pred_str[i] == label_str[i]) for i in range(len(pred_str))]) / len(pred_str) return {"accuracy": accuracy} # map train dataset train_dataset = train_dataset.map( _map_to_encoder_decoder_inputs, batched=True, batch_size=batch_size, remove_columns=["article", "highlights"], ) train_dataset.set_format( type="torch", columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"], ) # same for validation dataset val_dataset = val_dataset.map( _map_to_encoder_decoder_inputs, batched=True, batch_size=batch_size, remove_columns=["article", "highlights"], ) val_dataset.set_format( type="torch", columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"], ) output_dir = self.get_auto_remove_tmp_dir() training_args = Seq2SeqTrainingArguments( output_dir=output_dir, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, predict_with_generate=True, evaluation_strategy="steps", do_train=True, do_eval=True, warmup_steps=0, eval_steps=2, logging_steps=2, ) # instantiate trainer trainer = Seq2SeqTrainer( model=bert2bert, args=training_args, compute_metrics=_compute_metrics, train_dataset=train_dataset, eval_dataset=val_dataset, tokenizer=tokenizer, ) # start training trainer.train()
4,850
37.19685
118
py
transformers
transformers-main/tests/trainer/test_trainer_tpu.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This test is meant to be run in on an instance with TPUs like this: # # python examples/pytorch/xla_spawn.py --num_cores=8 tests/test_trainer_tpu.py # # Replace 8 with the number of TPU cores you have. # import sys from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.utils import logging logger = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class DummyDataset(Dataset): def __init__(self, length: int = 101): self.length = length def __len__(self): return self.length def __getitem__(self, i) -> int: return i class DummyDataCollator: def __call__(self, features): return {"input_ids": torch.tensor(features), "labels": torch.tensor(features)} class DummyModel(nn.Module): def __init__(self): super().__init__() # Add some (unused) params otherwise DDP will complain. self.fc = nn.Linear(120, 80) def forward(self, input_ids, labels=None): if labels is not None: return torch.tensor(0.0, device=input_ids.device), input_ids else: return input_ids def main(): parser = HfArgumentParser((TrainingArguments,)) sys.argv += ["--output_dir", "./examples"] training_args = parser.parse_args_into_dataclasses()[0] logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, " f"tpu_num_cores: {training_args.tpu_num_cores}", ) # Essentially, what we want to verify in the distributed case is # that we get all samples back, in the right order. # (this is crucial for prediction for instance) for dataset_length in [1001, 256, 15]: dataset = DummyDataset(dataset_length) def compute_metrics(p: EvalPrediction) -> Dict: sequential = list(range(len(dataset))) success = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential return {"success": success} trainer = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) metrics = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) p = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) trainer.args.eval_accumulation_steps = 2 metrics = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) p = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) trainer.args.eval_accumulation_steps = None logger.info("🔥 All distributed tests successful") def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
4,017
29.439394
97
py